## ← Publications

@article{DCKP16JAR,
author = {Dross, Claire and Conchon, Sylvain and Kanig, Johannes and Paskevich, Andrei},
title = {Adding Decision Procedures to {SMT} Solvers using Axioms with Triggers},
journal = {Journal of Automated Reasoning},
publisher = {Springer},
year = 2016,
volume = 56,
number = 4,
pages = {387--457},
index = {956},
pdf = {papers/jar-16.pdf},
abstract = {
SMT solvers are efficient tools to decide the
satisfiability of ground formulas, including a
number of built-in theories such as congruence,
linear arithmetic, arrays, and bit-vectors. Adding
theory to that list requires delving into the
implementation details of a given SMT solver, and
done mainly by the developers of the solver
itself. For many useful theories, one can
alternatively provide a first-order
axiomatization. However, in the presence of
quantifiers, SMT solvers are incomplete and exhibi
unpredictable behavior. Consequently, this approac
can not provide us with a complete and terminating
treatment of the theory of interest. In this paper
we propose a framework to solve this problem, base
on the notion of instantiation patterns, also know
as triggers. Triggers are annotations that suggest
instances which are more likely to be useful in
proof search. They are implemented in all SMT
solvers that handle first-order logic and are
included in the SMT-LIB format. In our framework,
the user provides a theory axiomatization with
triggers, along with a proof of completeness and
termination properties of this axiomatization, and
obtains a sound, complete, and terminating solver
for her theory in return. We describe and prove a
corresponding extension of the traditional Abstrac
DPLL Modulo Theory framework. Implementing this
mechanism in a given SMT solver requires a one-tim
development effort. We believe that this effort is
not greater than that of adding a single decision
procedure to the same SMT solver. We have
implemented the proposed extension in the Alt-Ergo
prover and we discuss some implementation details
the paper. To show that our framework can handle
complex theories, we prove completeness and
termination of a feature-rich axiomatization of
doubly-linked lists. Our tests show that our
approach results in a better performance of the
solver on goals that stem from the verification of
}

@inproceedings{cfp15vstte,
author = {Martin Clochard and Jean-Christophe Filliâtre and Andrei Paskevich},
title = {How to avoid proving the absence of integer overflows},
editor = {Arie Gurfinkel and Sanjit A. Seshia},
booktitle = {VSTTE 2015, 7th Working Conference on Verified Software: Theories, Tools and Experiments},
month = jul,
year = 2015,
index = {957},
pdf = {papers/vstte-15.pdf},
abstract = {
When proving safety of programs, we must show, in particular,
the absence of integer overflows. Unfortunately, there are lots
of situations where performing such a proof is extremely difficult,
because the appropriate restrictions on function arguments are
invasive and may be hard to infer. Yet, in certain cases, we can
relax the desired property and only require the absence of overflow
during the first n steps of execution, n being large enough for all
practical purposes. It turns out that this relaxed property can be
easily ensured for large classes of algorithms, so that only a minimal
amount of proof is needed, if at all. The idea is to restrict the set
of allowed arithmetic operations on the integer values in question,
imposing a "speed limit" on their growth. For example, if we repeatedly
increment a 64-bit integer, starting from zero, then we will need at
least 2 to the power of 64 steps to reach an overflow; on current hardware,
this takes several hundred years. When we do not expect any single
execution of our program to run that long, we have effectively proved
its safety against overflows of all variables with controlled growth
speed. In this paper, we give a formal explanation of this approach,
prove its soundness, and show how it is implemented in the context
of deductive verification.}
}

@inproceedings{filliatre14cav,
author = {Jean-Christophe Filliâtre and Léon Gondelman and Andrei Paskevich},
title = {The Spirit of Ghost Code},
editor = {Armin Biere and Roderick Bloem},
booktitle = {CAV 2014, 26th International Conference on Computer Aided Verification},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {8559},
pages = {1-16},
month = jul,
year = 2014,
index = {958},
pdf = {papers/cav-14.pdf},
abstract = {
In the context of deductive program verification, ghost code is part
of the program that is added for the purpose of specification.
Ghost code must not interfere with regular code, in the sense that
it can be erased without any observable difference in the program outcome.
In particular, ghost data cannot participate in regular
computations and ghost code cannot mutate regular data or diverge.
The idea exists in the folklore since the early notion of auxiliary
variables and is implemented in many state-of-the-art program
verification tools. However, a rigorous definition and treatment of
ghost code is surprisingly subtle and few formalizations exist.

with mutable state and ghost code.  Non-interference is ensured by a
type system with effects, which allows, notably, the same data types
and functions to be used in both regular and ghost code.
We define the procedure of ghost code erasure and we prove its
safety using bisimulation.
A similar type system, with numerous extensions which we briefly discuss,
is implemented in the program verification environment Why3.}
}

@inproceedings{cfmp14vstte,
author = {Martin Clochard and Jean-Christophe Filliâtre and Claude Marché and Andrei Paskevich},
title = {Formalizing Semantics with an Automatic Program Verifier},
booktitle = {VSTTE 2014, 6th International Conference on Verified Software: Theories, Tools, Experiments},
month = jul,
year = 2014,
editor = {Dimitra Giannakopoulou and Daniel Kroening},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
index = {959},
pdf = {papers/vstte-14.pdf},
abstract = {
A common belief is that formalizing semantics of programming
languages requires the use of a proof assistant providing
(1) a specification language with advanced features such as
higher-order logic, inductive definitions, type polymorphism,
and (2) a
corresponding proof environment where higher-order and inductive
reasoning can be performed, typically with user interaction.

In this paper we show that such a formalization is nowadays possible
inside a mostly-automatic program verification environment. We
substantiate this claim by formalizing several semantics for a
simple language, and proving their equivalence, inside the Why3
environment.}
}

@inproceedings{clochard14plpv,
author = {Martin Clochard and Claude Marché and Andrei Paskevich},
title = {Verified Programs with Binders},
booktitle = {PLPV 2014, ACM SIGPLAN Workshop on Programming Languages meets Program Verification},
year = 2014,
pages = {29-40},
publisher = {ACM Press},
index = {960},
pdf = {papers/plpv-14.pdf},
abstract = {
Programs that treat datatypes with binders, such as theorem provers or higher-order compilers, are regularly used for mission-critical purposes, and must be both reliable and performant. Formally proving such programs using as much automation as possible is highly desirable. In this paper, we propose a generic approach to handle datatypes with binders both in the program and its specification in a way that facilitates automated reasoning about such datatypes and also leads to a reasonably efficient code. Our method is implemented in the Why3 environment for program verification. We validate it on the examples of a lambda-interpreter with several reduction strategies and a simple tableaux-based theorem prover.}
}

@article{bobot14sttt,
author = {François Bobot and Jean-Christophe Filliâtre and Claude Marché and Andrei Paskevich},
title = {Let's Verify This with {Why3}},
journal = {International Journal on Software Tools for Technology Transfer},
url = {http://dx.doi.org/10.1007/s10009-014-0314-5},
year = 2014,
publisher = {Springer},
index = {961},
pdf = {papers/sttt-14.pdf},
abstract = {
We present solutions to the three challenges of the VerifyThis competition held at the 18th FM symposium in August 2012. These solutions use the Why3 environment for deductive program verification.}
}

@inproceedings{paskevich13cade,
author = {Jasmin C. Blanchette and Andrei Paskevich},
title = {{TFF1}: The {TPTP} typed first-order form with rank-1 polymorphism},
pages = {414-420},
booktitle = {CADE 2013, 24th International Conference on Automated Deduction},
month = jun,
year = 2013,
series = {Lecture Notes in Computer Science},
editor = {Maria Paola Bonacina},
volume = {7898},
publisher = {Springer},
index = {962},
abstract = {
The TPTP World is a well-established infrastructure for automatic theorem provers. It defines several concrete syntaxes, notably an untyped first-order form (FOF) and a typed first-order form (TFF0), that have become de facto standards. This paper introduces the TFF1 format, an extension of TFF0 with rank-1 polymorphism. The format is designed to be easy to process by existing reasoning tools that support ML-style polymorphism. It opens the door to useful middleware, such as monomorphizers and other translation tools that encode polymorphism in FOF or TFF0. Ultimately, the hope is that TFF1 will be implemented in popular automatic theorem provers.}
}

@inproceedings{bobot13vstte,
author = {François Bobot and Jean-Christophe Filliâtre and Claude Marché and Guillaume Melquiond and Andrei Paskevich},
title = {Preserving User Proofs Across Specification Changes},
pages = {191-201},
booktitle = {VSTTE 2013, 5th International Conference on Verified Software: Theories, Tools, Experiments},
month = may,
year = 2013,
editor = {Ernie Cohen and Andrey Rybalchenko},
series = {Lecture Notes in Computer Science},
volume = {8164},
publisher = {Springer},
index = {963},
pdf = {papers/vstte-13.pdf},
abstract = {
In the context of deductive program verification, both the specification and the code evolve as the verification process carries on. For instance, a loop invariant gets strengthened when additional properties are added to the specification. This causes all the related proof obligations to change; thus previous user verifications become invalid. Yet it is often the case that most of previous proof attempts (goal transformations, calls to interactive or automated provers) are still directly applicable or are easy to adjust. In this paper, we describe a technique to maintain a proof session against modification of verification conditions. This technique is implemented in the Why3 platform. It was successfully used in developing more than a hundred verified programs and in keeping them up to date along the evolution of Why3 and its standard library. It also helps out with changes in the environment, e.g. prover upgrades.}
}

@inproceedings{filliatre13esop,
author = {Jean-Christophe Filliâtre and Andrei Paskevich},
title = {{Why3} --- Where Programs Meet Provers},
booktitle = {ESOP 2013, Proceedings of the 22nd European Symposium on Programming},
month = mar,
year = {2013},
volume = {7792},
pages = {125-128},
series = {Lecture Notes in Computer Science},
editor = {Matthias Felleisen and Philippa Gardner},
publisher = {Springer},
index = {964},
pdf = {papers/esop-13.pdf},
abstract = {
We present Why3, a tool for deductive program verification, and WhyML,
its programming and specification language. WhyML is a first-order language with
polymorphic types, pattern matching, and inductive predicates. Programs can
make use of record types with mutable fields, type invariants, and ghost code.
Verification conditions are discharged by Why3 with the help of various existing
automated and interactive theorem provers. To keep verification conditions
tractable and comprehensible, WhyML imposes a static control of aliases that
obviates the use of a memory model. A user can write WhyML programs directly
and get correct-by-construction OCaml programs via an automated extraction
mechanism. WhyML is also used as an intermediate language for the verification
of C, Java, or Ada programs. We demonstrate the benefits of Why3 and WhyML on
non-trivial examples of program verification.
}
}

@inproceedings{dross12smt,
title = {Reasoning with Triggers},
author = {Dross, Claire and Conchon, Sylvain and Kanig, Johannes
booktitle = {SMT 2012, 10th International Workshop on Satisability Modulo Theories},
editor = {Pascal Fontaine and Amit Goel},
month = jun,
year = {2012},
pages = {22-31},
index = {966},
pdf = {papers/smt-12.pdf},
abstract = {
SMT solvers can decide the satisfiability of ground formulas modulo a combination of
built-in theories. Adding a built-in theory to a given SMT solver is a complex and time
consuming task that requires internal knowledge of the solver. However, many theories
(arrays, reachability), can be easily expressed using first-order formulas.
Unfortunately, since universal quantifiers are not handled in a complete way by
SMT solvers, these axiomatics cannot be used as decision procedures.
In this paper, we show how to extend a generic SMT solver to accept a custom theory
description and behave as a decision procedure for that theory, provided that the described
theory is complete and terminating in a precise sense. The description language consists
of first-order axioms with triggers, an instantiation mechanism that is found in many SMT
solvers. This mechanism, which usually lacks a clear semantics in existing languages and
tools, is rigorously defined here; this definition can be used to prove completeness and
termination of the theory. We demonstrate using the theory of arrays, how such proofs
can be achieved in our formalism.
}
}

@techreport{dross12rr,
title = {Reasoning with Triggers},
author = {Dross, Claire and Conchon, Sylvain and Kanig, Johannes
pages = {29},
type = {Research Report},
institution = {INRIA},
number = {RR-7986},
year = 2012,
month = jun,
index = {967},
pdf = {papers/rr7986-12.pdf},
url = {http://hal.inria.fr/hal-00703207},
abstract = {
SMT solvers can decide the satisfiability of ground
formulas modulo a combination of built-in
theories. Adding a built-in theory to a given SMT
solver is a complex and time consuming task that
requires internal knowledge of the solver. However,
many theories can be easily expressed using
first-order formulas. Unfortunately, since universal
quantifiers are not handled in a complete way by SMT
solvers, these axiomatics cannot be used as decision
procedures. In this paper, we show how to extend a
generic SMT solver to accept a custom theory
description and behave as a decision procedure for
that theory, provided that the described theory is
complete and terminating in a precise sense. The
description language consists of first-order axioms
with triggers, an instantiation mechanism that is
found in many SMT solvers. This mechanism, which
usually lacks a clear semantics in existing
languages and tools, is rigorously defined here;
this definition can be used to prove completeness
and termination of the theory. We demonstrate on two
examples, how such proofs can be achieved in our
formalism.}
}

@inproceedings{filliatre12compare,
author = {Jean-Christophe Filliâtre and Andrei Paskevich and Aaron Stump},
title = {The 2nd Verified Software Competition: Experience Report},
booktitle = {COMPARE2012, 1st International Workshop on Comparative Empirical Evaluation of Reasoning Systems},
month = jun,
year = 2012,
editor = {Vladimir Klebanov and Bernhard Beckert and Armin Biere and Geoff Sutcliffe},
series = {CEUR Workshop Proceedings},
volume = {873},
pages = {36--49},
index = {968},
pdf = {papers/compare-12.pdf},
abstract = {
We report on the second verified software competition. It was
organized by the three authors on a 48 hours period on November
8--10, 2011. This paper describes the competition, presents the
five problems that were proposed to the participants, and gives
an overview of the solutions sent by the 29 teams that entered
the competition.}
}

@inproceedings{BP2011frocos,
author = {François Bobot and Andrei Paskevich},
title = {Expressing Polymorphic Types in a Many-Sorted Language},
editor = {Tinelli, Cesare and Sofronie-Stokkermans, Viorica},
booktitle = {FroCoS 2011, 8th International Symposium on Frontiers of Combining Systems},
series = {Lecture Notes in Computer Science},
volume = {6989},
year = {2011},
pages = {87--102},
month = oct,
publisher = {Springer},
index = {969},
pdf = {papers/frocos-11.pdf},
abstract = {
In this paper, we study translation from a first-order logic with polymorphic
types à la ML (of which we give a formal description) to a many-sorted or
one-sorted logic as accepted by mainstream automated theorem provers.
We consider a three-stage scheme where the last stage eliminates polymorphic
types while adding the necessary "annotations" to preserve soundness, and the
first two stages serve to protect certain terms so that they can keep their
original unannotated form. This protection allows us to make use of provers'
built-in theories and operations.
We present two existing translation procedures as sound and complete instances
of this generic scheme. Our formulation generalizes over the previous ones by
allowing us to protect terms of arbitrary monomorphic types. In particular,
we can benefit from the built-in theory of arrays in SMT solvers such as
Z3, CVC3, and Yices.
The proposed methods are implemented in the Why3 tool and we compare their
performance in combination with several automated provers.
}
}

@misc{BP2011report,
author = {François Bobot and Andrei Paskevich},
title = {Expressing Polymorphic Types in a Many-Sorted Language},
note = {Extended report},
url = {http://hal.inria.fr/inria-00591414/en/},
month = jul,
year = 2011,
index = {970},
pdf = {papers/polyfol-11.pdf},
abstract = {
In this paper, we study translation from a first-order logic with polymorphic
types à la ML (of which we give a formal description) to a many-sorted or
one-sorted logic as accepted by mainstream automated theorem provers.
We consider a three-stage scheme where the last stage eliminates polymorphic
types while adding the necessary "annotations" to preserve soundness, and the
first two stages serve to protect certain terms so that they can keep their
original unannotated form. This protection allows us to make use of provers'
built-in theories and operations.
We present two existing translation procedures as sound and complete instances
of this generic scheme. Our formulation generalizes over the previous ones by
allowing us to protect terms of arbitrary monomorphic types. In particular,
we can benefit from the built-in theory of arrays in SMT solvers such as
Z3, CVC3, and Yices.
The proposed methods are implemented in the Why3 tool and we compare their
performance in combination with several automated provers.
}
}

@inproceedings{BFMP11boogie,
author = {François Bobot and Jean-Christophe Filliâtre and Claude Marché and Andrei Paskevich},
title = {{Why3}: Shepherd Your Herd of Provers},
booktitle = {Boogie 2011, First International Workshop on Intermediate Verification Languages},
year = 2011,
pages = {53--64},
month = aug,
index = {971},
pdf = {papers/boogie-11.pdf},
abstract = {
Why3 is the next generation of the Why software verification
platform. Why3 clearly separates the purely logical specification part
focuses on the former part. Why3 comes with a new enhanced language
of logical specification. It features a rich library of proof task
transformations that can be chained to produce a suitable input for
a large set of theorem provers, including SMT solvers, TPTP provers,
as well as interactive proof assistants.
}
}

@inproceedings{CCFPPU10PEPM,
author = {{\'E}velyne Contejean and Pierre Courtieu and Julien Forest
and Andrei Paskevich and Olivier Pons and Xavier Urbain},
title = {{A3PAT}, an approach for certified automated termination proofs},
booktitle = {PEPM'10, Proceedings of the 2010 ACM SIGPLAN Workshop on
Partial Evaluation and Program Manipulation},
publisher = {ACM},
month = jan,
year = {2010},
pages = {63--72},
doi = {http://doi.acm.org/10.1145/1706356.1706370},
index = {972},
pdf = {papers/pepm-10.pdf},
abstract = {
Software engineering, automated reasoning, rule-based programming
or specifications often use rewriting systems for which termination,
among other properties, may have to be ensured.This paper presents
the approach developed in Project A3PAT to discover and moreover
certify, with full automation, termination proofs for term rewriting
systems.

It consists of two developments: the Coccinelle library formalises
numerous rewriting techniques and termination criteria for the Coq
proof assistant; the CiME3 rewriting tool translates termination
proofs (discovered by itself or other tools) into traces that are
certified by Coq assisted by Coccinelle.

The abstraction level of our formalisation allowed us to weaken
premises of some theorems known in the literature, thus yielding
new termination criteria, such as an extension of the powerful
subterm criterion (for which we propose the first full Coq
formalisation). Techniques employed in CiME3 also improve on
previous works on formalisation and analysis of dependency graphs.
}
}

@inproceedings{VLPA08MKM,
author = {Konstantin Verchinine and Alexander Lyaletski
and Andrei Paskevich and Anatoly Anisimov},
title = {On correctness of mathematical texts from
a logical and practical point of view},
booktitle = {Intelligent Computer Mathematics, AISC/Calculemus/MKM 2008},
editor = {Serge Autexier and John Campbell and Julio Rubio
and Volker Sorge and Masakazu Suzuki and Freek Wiedijk},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
month = jul,
year = 2008,
volume = 5144,
pages = {583--598},
index = {973},
pdf = {papers/mkm-08.pdf},
abstract = {
Formalizing mathematical argument is a fascinating activity
in itself and (we hope!) also bears important practical
applications. While traditional proof theory investigates
deducibility of an individual statement from a collection
of premises, a mathematical proof, with its structure and
continuity, can hardly be presented as a single sequent or
a set of logical formulas. What is called mathematical text'',
as used in mathematical practice through the ages, seems to be
more appropriate. However, no commonly adopted formal notion
of mathematical text has emerged so far.

In this paper, we propose a formalism which aims to reflect
natural (human) style and structure of mathematical argument,
yet to be appropriate for automated processing: principally,
verification of its correctness (we consciously use the word
rather than soundness'' or validity'').

We consider mathematical texts that are formalized in
the ForTheL language (brief description of which is also given)
and we formulate a point of view on what a correct mathematical
text might be. Logical notion of correctness is formalized with
the help of a calculus. Practically, these ideas, methods and
algorithms are implemented in a proof assistant called SAD.
We give a short description of SAD and a series of examples
showing what can be done with it.
}
}

@article{P08JAR,
title = {Connection Tableaux with Lazy Paramodulation},
journal = {Journal of Automated Reasoning},
publisher = {Springer},
year = 2008,
volume = 40,
number = {2--3},
pages = {179--194},
index = {974},
pdf = {papers/jar-08.pdf},
abstract = {
It is well known that the connection refinement of clause
tableaux with paramodulation is incomplete (even with weak
connections). In this paper, we present a new connection
tableau calculus for logic with equality. This calculus
is based on a lazy form of paramodulation where parts of
the unification step become auxiliary subgoals in a tableau
and may be subjected to subsequent paramodulations.
Our calculus uses ordering constraints and a certain form
of the basicness restriction.
}
}

@phdthesis{P07THES,
title = {Méthodes de formalisation des connaissances
et des raisonnements mathématiques:
aspects appliqués et théoriques},
school = {Université Paris 12},
year = 2007,
pages = {128},
note = {In French},
index = {975},
pdf = {papers/thesis-07.fr.pdf},
abstract = {
We study the means of presentation of mathematical knowledge
and reasoning schemes. Our research aims at an automated system
for verification of formalized mathematical texts.

In this system, a text to verify is written in a formal language
which is close to the natural language and style of mathematical
publications. Our intention is to exploit the hint which are
given to us by the «human» form of the problem: definitions,
proof schemes, nouns denoting classes of objects, etc.
We describe such a language, called ForTheL.

Verification consists in showing that the text is «sensible»
and «grounded», that functions and relations are applied within
the domain, according to the definitions, and assertions follow
from their respective premises. A formal definition of a correct
text relies on a sound sequent calculus and on the notion of local
validity (local with respect to some occurrence inside a formula).

Proof search is carried out on two levels. The lower level is
an automated theorem prover based on a combinatorial procedure.
We introduce a variant of connection tableaux which is sound and
complete in the first-order logic with equality.

The higher level is a «reasoner» which employs natural proving
techniques in order to filter, simplify, decompose a proof task
before passing it to the prover. The algorithms of the reasoner
are based on transformations that preserve the locally valid
propositions.

The proposed methods are implemented in the proof assistant SAD.
}
}

@inproceedings{PVLA07MKM,
author = {Andrei Paskevich and Konstantin Verchinine
and Alexander Lyaletski and Anatoly Anisimov},
title = {Reasoning inside a formula and ontological correctness
of a formal mathematical text},
booktitle = {Calculemus/MKM 2007 --- Work in Progress},
editor = {Manuel Kauers and Manfred Kerber and Robert Miner
and Wolfgang Windsteiger},
series = {RISC-Linz Report Series, University of Linz, Austria},
month = jun,
year = 2007,
number = {07-06},
pages = {77--91},
index = {976},
pdf = {papers/mkm-07.pdf},
abstract = {
Dealing with a formal mathematical text (which we regard
as a structured collection of hypotheses and conclusions),
we often want to perform various analysis and transformation
tasks on the initial formulas, without preliminary normalization.
One particular example is checking for ontological correctness'',
namely, that every occurrence of a non-logical symbol stems
from some definition of that symbol in the foregoing text.
Generally, we wish to test whether some known fact (axiom,
definition, lemma) is applicable'' at a given position
inside a statement, and to actually apply it (when needed)
in a logically sound way.

In this paper, we introduce the notion of a locally valid
statement, a statement that can be considered true at
a given position inside a first-order formula. We justify
the reasoning about innards'' of a formula; in particular,
we show that a locally valid equivalence is a sufficient
condition for an equivalent transformation of a subformula.
Using the notion of local validity, we give a formal definition
of ontological correctness for a text written in a special
formal language called ForTheL.
}
}

@inproceedings{VLP07CADE,
author = {Konstantin Verchinine and Alexander Lyaletski
title = {{System for Automated Deduction (SAD): a tool
for proof verification}},
booktitle = {Automated Deduction, 21st International Conference, CADE-21},
editor = {Frank Pfenning},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
month = jul,
year = 2007,
volume = 4603,
pages = {398--403},
index = {977},
abstract = {
In this paper, a proof assistant, called SAD, is presented.
SAD deals with mathematical texts that are formalized in
the ForTheL language (brief description of which is also
given) and checks their correctness. We give a short
description of SAD and a series of examples that show
what can be done with it. Note that abstract notion of
correctness on which the implementation is based, can be
formalized with the help of a calculus (not presented here).
}
}

@inproceedings{P06IJCAR,
title = {Connection Tableaux with Lazy Paramodulation},
booktitle = {Automated Reasoning, 3rd International
Joint Conference, IJCAR 2006},
editor = {Uli Furbach and Natarajan Shankar},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
month = aug,
year = 2006,
volume = 4130,
pages = {112--124},
index = {978},
pdf = {papers/ijcar-06.pdf},
abstract = {
It is well-known that the connection refinement of clause
tableaux with paramodulation is incomplete (even with weak
connections). In this paper, we present a new connection
tableau calculus for logic with equality. This calculus
is based on a lazy form of paramodulation where parts of
the unification step become auxiliary subgoals in a tableau
and may be subjected to subsequent paramodulations.
Our calculus uses ordering constraints and a certain form
of the basicness restriction.
}
}

@article{LPV06JAL,
author = {Alexander Lyaletski and Andrey Paskevich
and Konstantin Verchinine},
title = {{SAD} as a mathematical assistant --- how
should we go from here to there?},
journal = {Journal of Applied Logic},
booktitle = {Towards Computer Aided Mathematics},
editor = {Christoph Benzm\"uller},
publisher = {Elsevier},
year = 2006,
volume = 4,
number = 4,
pages = {560--591},
index = {980},
ps = {papers/jal-06.ps.gz},
abstract = {
The System for Automated Deduction (SAD) is developed
in the framework of the Evidence Algorithm research project
and is intended for automated processing of mathematical texts.
The SAD system works on three levels of reasoning:
(a) the level of text presentation where proofs are written
in a formal natural-like language for subsequent verification;
(b) the level of foreground reasoning where a particular
theorem proving problem is simplified and decomposed;
(c) the level of background deduction where exhaustive
combinatorial inference search in classical first-order
logic is applied to prove end subgoals. We present an overview
of SAD describing the ideas behind the project, the system's
design, and the process of problem formalization in the fashion
of SAD. We show that the choice of classical first-order logic
as the background logic of SAD is not too restrictive.
For example, we can handle binders like $\mathit{\Sigma}$
or $\mathit{lim}$ without resort to second order or to
a full-powered set theory. We illustrate our approach with
a series of examples, in particular, with the classical
problem $\sqrt{2} \not\in \mathbf{Q}$.
}
}

@phdthesis{P05THES,
title = {Methods of formalization of mathematical knowledge
and reasoning: theoretical and practical aspects},
school = {Kiev National Taras Shevchenko University},
year = 2005,
note = {In Ukrainian},
pages = {140},
index = {981},
pdf = {papers/thesis-05.ua.pdf}
}

@inproceedings{LPV04MKM,
author = {Alexander Lyaletski and Konstantin Verchinine
title = {Theorem Proving and Proof Verification in the System {SAD}},
booktitle = {Mathematical Knowledge Management,
3rd International Conference, MKM 2004},
editor = {Andrea Asperti and Grzegorz Bancerek and Andrzej Trybulec},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
month = sep,
year = 2004,
volume = 3119,
pages = {236--250},
index = {982},
ps = {papers/mkm-04.ps.gz},
abstract = {
In this paper, the current state of the System for Automated
Deduction, SAD, is described briefly. The system may be
considered as the modern vision of the Evidence Algorithm
V.~Glushkov proposed to make investigation simultaneously
into formalized languages for presenting mathematical texts
in the form most appropriate for a user, formalization and
evolutionary development of computer-made proof step,
information environment having an influence on the evidence
of a proof step, and man-assisted search for a proof.
In this connection, SAD supports a number of formal languages
for representing and processing mathematical knowledge along
with the formal language ForTheL as their top representative,
uses a sequent formalism developed for constructing an efficient
technique of proof search within the signature of an initial
theory, and gives a new way to organize the information
environment for sharing mathematical knowledge among various
computer services. The system SAD can be used to solve large
variety of theorem proving problems including: establishing
of the deducibility of sequents in first-order classical logic,
theorem proving in ForTheL-environment, verifying correctness
of self-contained ForTheL-texts, solving problems taken from
the online library TPTP. A number of examples is given for
illustrating the mathematical knowledge processing implemented
}
}

@inproceedings{LDPV04ISTA,
author = {Alexander V. Lyaletski and Anatoly E. Doroshenko
and Andrey Paskevich and Konstantin Verchinine},
title = {Evidential Paradigm and Intelligent
Mathematical Text Processing},
booktitle = {Information Systems Technology and its Applications,
3rd International Conference, ISTA 2004},
editor = {Anatoly E. Doroshenko and Terry A. Halpin
and Stephen W. Liddle and Heinrich C. Mayr},
series = {Lecture Notes in Informatics},
publisher = {GI},
address = {Salt Lake City UT, USA},
month = jul,
year = 2004,
volume = 48,
pages = {205--211},
index = {983},
pdf = {papers/ista-04.pdf},
abstract = {
This paper presents the evidential paradigm
of computer-supported mathematical assistance in doing''
mathematics and in reasoning activity. At present, the evidential
paradigm is implemented in the form of System for Automated
Deduction (SAD). The system is based on the methods of automated
theorem proving and is intended for intelligent mathematical text
processing. It proves mathematical theorems, verifies validity of
self-contained mathematical texts and can be used for inference
search in first-order sequent-based logic as well. For human-like
representation of mathematical knowledge, SAD exploits an original
formal language close to natural languages of scientific publications.
Since the problem of automated text verification is of great
importance for industrial applications (checking specifications,
proving safety properties of network protocols, etc), the paper
illustrates some principles and peculiarities of the evidential
paradigm by means of exemplifying the verification of a part of
a non-trivial mathematical text.
}
}

@inproceedings{LPV03ITCLS,
author = {Alexander Lyaletski and Konstantin Verchinine
title = {On Verification Tools Implemented in the {System}
for {Automated} {Deduction}},
booktitle = {Implementation Technology for Computational
Logic Systems, 2nd CoLogNet Workshop, ITCLS 2003},
month = sep,
year = 2003,
pages = {3--14},
index = {984},
ps = {papers/itcls-03.ps.gz},
abstract = {
Among the tasks of the Evidence Algorithm programme,
the verification of formalized mathematical texts is
of great significance. Our investigations in this domain
were brought to practice in the last version of the System
for Automated Deduction (SAD). The system exploits a formal
language to represent mathematical knowledge in a natural''
form and a sequential first-order formalism to prove statements
in the frame of a self-contained mathematical text. In the paper,
we give an overview of the architecture of SAD and its verification
tools. In order to demonstrate the work of SAD, a sample verification
session is examined.
}
}

@inproceedings{ADLPPV02WIL,
author = {Zainutdin Aselderov and Konstantin Verchinine
and Anatoli Degtyarev and Alexander Lyaletski
and Andrey Paskevich and Alexandre Pavlov},
title = {Linguistic Tools and Deductive Technique of the
{System} for {Automated} {Deduction}},
booktitle = {Implementation of Logics, 3rd International
Workshop, WIL 2002},
month = oct,
year = 2002,
pages = {21--24},
index = {991},
ps = {papers/wil-02.ps.gz},
abstract = {
This paper is devoted to a brief description of some
peculiarities and of the first software implementation
of the System for Automated Deduction, SAD. Architecture
of SAD corresponds well to a modern vision of the Evidence
Algorithm that was conceived by V. Glushkov as a programme
for constructing open systems for automated theorem proving
that are intended for computer-aided doing'' mathematics:
i.e. for extracting and accumulating mathematical computer
knowledge and for using it in a regular and efficient manner
to prove a given statement that is always considered as a part
of a self-contained mathematical text. In addition, the main
principles of SAD reflect the current understanding of
the problem of man-assisted processing of mathematical
computer knowledge.
}
}

@inproceedings{DLPV02IIS,
author = {Alexander Lyaletski and Konstantin Verchinine
and Anatoli Degtyarev and Andrey Paskevich},
title = {{System} for {Automated} {Deduction} ({SAD}):
Linguistic and Deductive Peculiarities},
booktitle = {Intelligent Information Systems, 11th International
Symposium, IIS 2002},
editor = {M. A. Klopotek and S. T. Wierzchon and M. Michalewicz},
series = {Advances in Soft Computing},
publisher = {Physica-Verlag},
month = jun,
year = 2002,
pages = {413--422},
index = {992},
ps = {papers/iis-02.ps.gz},
abstract = {
In this paper a state-of-the-art of a system for automated
corresponds well to a modern vision of the Evidence Algorithm
programme initiated by Academician V.~Glushkov. The system
is intended for accumulating mathematical knowledge and using
it in a regular and efficient manner for processing
a self-contained mathematical text in order to prove
a given statement that always is considered as a part
of the text. Two peculiarities are inherent in SAD:
(a) mathematical texts under consideration are formalized
with the help of a specific formal language, which is close
to a natural language used in mathematical publications;
(b) proof search is based on a specific sequent-type calculus,
which gives a possibility to formalize natural reasoning style''.
The language may be used as a tool to write and verify mathematical
papers, theorems, and formal specifications, to perform model
checking, and so on. The calculus is oriented to constructing
some natural proof search methods such as definition and
auxiliary proposition applications.
}
}

@inproceedings{LP01STRAT,
author = {Alexander Lyaletski and Andrey Paskevich},
title = {Goal-driven inference search in classical propositional logic},
booktitle = {Proc. International Workshop {STRATEGIES'2001}},
month = jun,
year = 2001,
pages = {65--74},
index = {995},
ps = {papers/strat-01.ps.gz},
abstract = {
Two goal-driven strategies for inference search in propositional
logic are presented in the form of special sequent calculi.
Some results on their soundness and completeness are given,
and comparison of these calculi is made. It is shown that
no one of them is preferable to another in the sense of
constructing minimal inferences.
}
}

@article{PV00ITA,
author = {Konstantin Vershinin and Andrey Paskevich},
title = {{ForTheL} --- the language of formal theories},
journal = {International Journal of Information
Theories and Applications},
year = 2000,
volume = 7,
number = 3,
pages = {120--126},
index = {998},
ps = {papers/ita-00.ps.gz},
abstract = {
The paper describes the language ForTheL (FORmal THEory
Language) aimed at representation of formal theories ---
in strict mathematical sense of the term. Its syntax is
strictly formalised in the terms of BNF and its semantics
can be precisely defined with the help of syntactical
transformation that maps ForTheL-phrases to the formulae
of first-order logic with equality. At the same time ForTheL
is intended to be close to the natural language of mathematical
texts, it formalises and uses expressive means of the human
language.
}
}


Last update: 5 Sep 2015