adam@289
|
1 (* Copyright (c) 2008-2010, Adam Chlipala
|
adamc@142
|
2 *
|
adamc@142
|
3 * This work is licensed under a
|
adamc@142
|
4 * Creative Commons Attribution-Noncommercial-No Derivative Works 3.0
|
adamc@142
|
5 * Unported License.
|
adamc@142
|
6 * The license text is available at:
|
adamc@142
|
7 * http://creativecommons.org/licenses/by-nc-nd/3.0/
|
adamc@142
|
8 *)
|
adamc@142
|
9
|
adamc@142
|
10 (* begin hide *)
|
adamc@142
|
11 Require Import List.
|
adamc@142
|
12
|
adamc@142
|
13 Require Import Tactics MoreSpecif.
|
adamc@142
|
14
|
adamc@142
|
15 Set Implicit Arguments.
|
adamc@142
|
16 (* end hide *)
|
adamc@142
|
17
|
adamc@142
|
18
|
adamc@142
|
19 (** %\chapter{Proof by Reflection}% *)
|
adamc@142
|
20
|
adamc@142
|
21 (** The last chapter highlighted a very heuristic approach to proving. In this chapter, we will study an alternative technique, %\textit{%#<i>#proof by reflection#</i>#%}%. We will write, in Gallina, decision procedures with proofs of correctness, and we will appeal to these procedures in writing very short proofs. Such a proof is checked by running the decision procedure. The term %\textit{%#<i>#reflection#</i>#%}% applies because we will need to translate Gallina propositions into values of inductive types representing syntax, so that Gallina programs may analyze them. *)
|
adamc@142
|
22
|
adamc@142
|
23
|
adamc@142
|
24 (** * Proving Evenness *)
|
adamc@142
|
25
|
adamc@142
|
26 (** Proving that particular natural number constants are even is certainly something we would rather have happen automatically. The Ltac-programming techniques that we learned in the last chapter make it easy to implement such a procedure. *)
|
adamc@142
|
27
|
adamc@142
|
28 Inductive isEven : nat -> Prop :=
|
adamc@144
|
29 | Even_O : isEven O
|
adamc@144
|
30 | Even_SS : forall n, isEven n -> isEven (S (S n)).
|
adamc@142
|
31
|
adamc@148
|
32 (* begin thide *)
|
adamc@142
|
33 Ltac prove_even := repeat constructor.
|
adamc@148
|
34 (* end thide *)
|
adamc@142
|
35
|
adamc@142
|
36 Theorem even_256 : isEven 256.
|
adamc@142
|
37 prove_even.
|
adamc@142
|
38 Qed.
|
adamc@142
|
39
|
adamc@142
|
40 Print even_256.
|
adamc@221
|
41 (** %\vspace{-.15in}% [[
|
adamc@142
|
42 even_256 =
|
adamc@142
|
43 Even_SS
|
adamc@142
|
44 (Even_SS
|
adamc@142
|
45 (Even_SS
|
adamc@142
|
46 (Even_SS
|
adamc@221
|
47
|
adamc@142
|
48 ]]
|
adamc@142
|
49
|
adam@289
|
50 %\noindent%...and so on. This procedure always works (at least on machines with infinite resources), but it has a serious drawback, which we see when we print the proof it generates that 256 is even. The final proof term has length super-linear in the input value. (Coq's implicit arguments mechanism is hiding the values given for parameter [n] of [Even_SS], which is why the proof term only appears linear here.) This seems like a shame, since we could write a trivial and trustworthy program to verify evenness of constants. The proof checker could simply call our program where needed.
|
adamc@142
|
51
|
adamc@142
|
52 It is also unfortunate not to have static typing guarantees that our tactic always behaves appropriately. Other invocations of similar tactics might fail with dynamic type errors, and we would not know about the bugs behind these errors until we happened to attempt to prove complex enough goals.
|
adamc@142
|
53
|
adamc@142
|
54 The techniques of proof by reflection address both complaints. We will be able to write proofs like this with constant size overhead beyond the size of the input, and we will do it with verified decision procedures written in Gallina.
|
adamc@142
|
55
|
adamc@221
|
56 For this example, we begin by using a type from the [MoreSpecif] module (included in the book source) to write a certified evenness checker. *)
|
adamc@142
|
57
|
adamc@142
|
58 Print partial.
|
adamc@221
|
59 (** %\vspace{-.15in}% [[
|
adamc@221
|
60 Inductive partial (P : Prop) : Set := Proved : P -> [P] | Uncertain : [P]
|
adamc@221
|
61
|
adamc@221
|
62 ]]
|
adamc@142
|
63
|
adamc@221
|
64 A [partial P] value is an optional proof of [P]. The notation [[P]] stands for [partial P]. *)
|
adamc@142
|
65
|
adamc@221
|
66 Local Open Scope partial_scope.
|
adamc@142
|
67
|
adamc@142
|
68 (** We bring into scope some notations for the [partial] type. These overlap with some of the notations we have seen previously for specification types, so they were placed in a separate scope that needs separate opening. *)
|
adamc@142
|
69
|
adamc@148
|
70 (* begin thide *)
|
adamc@142
|
71 Definition check_even (n : nat) : [isEven n].
|
adamc@142
|
72 Hint Constructors isEven.
|
adamc@142
|
73
|
adamc@142
|
74 refine (fix F (n : nat) : [isEven n] :=
|
adamc@221
|
75 match n with
|
adamc@142
|
76 | 0 => Yes
|
adamc@142
|
77 | 1 => No
|
adamc@142
|
78 | S (S n') => Reduce (F n')
|
adamc@142
|
79 end); auto.
|
adamc@142
|
80 Defined.
|
adamc@142
|
81
|
adamc@142
|
82 (** We can use dependent pattern-matching to write a function that performs a surprising feat. When given a [partial P], this function [partialOut] returns a proof of [P] if the [partial] value contains a proof, and it returns a (useless) proof of [True] otherwise. From the standpoint of ML and Haskell programming, it seems impossible to write such a type, but it is trivial with a [return] annotation. *)
|
adamc@142
|
83
|
adamc@142
|
84 Definition partialOut (P : Prop) (x : [P]) :=
|
adamc@142
|
85 match x return (match x with
|
adamc@142
|
86 | Proved _ => P
|
adamc@142
|
87 | Uncertain => True
|
adamc@142
|
88 end) with
|
adamc@142
|
89 | Proved pf => pf
|
adamc@142
|
90 | Uncertain => I
|
adamc@142
|
91 end.
|
adamc@142
|
92
|
adam@289
|
93 (** It may seem strange to define a function like this. However, it turns out to be very useful in writing a reflective version of our earlier [prove_even] tactic: *)
|
adamc@142
|
94
|
adamc@142
|
95 Ltac prove_even_reflective :=
|
adamc@142
|
96 match goal with
|
adamc@142
|
97 | [ |- isEven ?N] => exact (partialOut (check_even N))
|
adamc@142
|
98 end.
|
adamc@148
|
99 (* end thide *)
|
adamc@142
|
100
|
adam@289
|
101 (** We identify which natural number we are considering, and we %``%#"#prove#"#%''% its evenness by pulling the proof out of the appropriate [check_even] call. *)
|
adamc@142
|
102
|
adamc@142
|
103 Theorem even_256' : isEven 256.
|
adamc@142
|
104 prove_even_reflective.
|
adamc@142
|
105 Qed.
|
adamc@142
|
106
|
adamc@142
|
107 Print even_256'.
|
adamc@221
|
108 (** %\vspace{-.15in}% [[
|
adamc@142
|
109 even_256' = partialOut (check_even 256)
|
adamc@142
|
110 : isEven 256
|
adamc@221
|
111
|
adamc@142
|
112 ]]
|
adamc@142
|
113
|
adam@289
|
114 We can see a constant wrapper around the object of the proof. For any even number, this form of proof will suffice. The size of the proof term is now linear in the number being checked, containing two repetitions of the unary form of that number, one of which is hidden above within the implicit argument to [partialOut].
|
adam@289
|
115
|
adam@289
|
116 What happens if we try the tactic with an odd number? *)
|
adamc@142
|
117
|
adamc@142
|
118 Theorem even_255 : isEven 255.
|
adamc@142
|
119 (** [[
|
adamc@142
|
120 prove_even_reflective.
|
adamc@142
|
121
|
adamc@142
|
122 User error: No matching clauses for match goal
|
adamc@221
|
123
|
adamc@142
|
124 ]]
|
adamc@142
|
125
|
adamc@142
|
126 Thankfully, the tactic fails. To see more precisely what goes wrong, we can run manually the body of the [match].
|
adamc@142
|
127
|
adamc@142
|
128 [[
|
adamc@142
|
129 exact (partialOut (check_even 255)).
|
adamc@142
|
130
|
adamc@142
|
131 Error: The term "partialOut (check_even 255)" has type
|
adamc@142
|
132 "match check_even 255 with
|
adamc@142
|
133 | Yes => isEven 255
|
adamc@142
|
134 | No => True
|
adamc@142
|
135 end" while it is expected to have type "isEven 255"
|
adamc@221
|
136
|
adamc@142
|
137 ]]
|
adamc@142
|
138
|
adamc@142
|
139 As usual, the type-checker performs no reductions to simplify error messages. If we reduced the first term ourselves, we would see that [check_even 255] reduces to a [No], so that the first term is equivalent to [True], which certainly does not unify with [isEven 255]. *)
|
adamc@221
|
140
|
adamc@142
|
141 Abort.
|
adamc@143
|
142
|
adamc@143
|
143
|
adamc@143
|
144 (** * Reflecting the Syntax of a Trivial Tautology Language *)
|
adamc@143
|
145
|
adamc@143
|
146 (** We might also like to have reflective proofs of trivial tautologies like this one: *)
|
adamc@143
|
147
|
adamc@143
|
148 Theorem true_galore : (True /\ True) -> (True \/ (True /\ (True -> True))).
|
adamc@143
|
149 tauto.
|
adamc@143
|
150 Qed.
|
adamc@143
|
151
|
adamc@143
|
152 Print true_galore.
|
adamc@221
|
153 (** %\vspace{-.15in}% [[
|
adamc@143
|
154 true_galore =
|
adamc@143
|
155 fun H : True /\ True =>
|
adamc@143
|
156 and_ind (fun _ _ : True => or_introl (True /\ (True -> True)) I) H
|
adamc@143
|
157 : True /\ True -> True \/ True /\ (True -> True)
|
adamc@221
|
158
|
adamc@143
|
159 ]]
|
adamc@143
|
160
|
adamc@143
|
161 As we might expect, the proof that [tauto] builds contains explicit applications of natural deduction rules. For large formulas, this can add a linear amount of proof size overhead, beyond the size of the input.
|
adamc@143
|
162
|
adam@289
|
163 To write a reflective procedure for this class of goals, we will need to get into the actual %``%#"#reflection#"#%''% part of %``%#"#proof by reflection.#"#%''% It is impossible to case-analyze a [Prop] in any way in Gallina. We must %\textit{%#<i>#reflect#</i>#%}% [Prop] into some type that we %\textit{%#<i>#can#</i>#%}% analyze. This inductive type is a good candidate: *)
|
adamc@143
|
164
|
adamc@148
|
165 (* begin thide *)
|
adamc@143
|
166 Inductive taut : Set :=
|
adamc@143
|
167 | TautTrue : taut
|
adamc@143
|
168 | TautAnd : taut -> taut -> taut
|
adamc@143
|
169 | TautOr : taut -> taut -> taut
|
adamc@143
|
170 | TautImp : taut -> taut -> taut.
|
adamc@143
|
171
|
adam@289
|
172 (** We write a recursive function to %``%#"#unreflect#"#%''% this syntax back to [Prop]. *)
|
adamc@143
|
173
|
adamc@143
|
174 Fixpoint tautDenote (t : taut) : Prop :=
|
adamc@143
|
175 match t with
|
adamc@143
|
176 | TautTrue => True
|
adamc@143
|
177 | TautAnd t1 t2 => tautDenote t1 /\ tautDenote t2
|
adamc@143
|
178 | TautOr t1 t2 => tautDenote t1 \/ tautDenote t2
|
adamc@143
|
179 | TautImp t1 t2 => tautDenote t1 -> tautDenote t2
|
adamc@143
|
180 end.
|
adamc@143
|
181
|
adamc@143
|
182 (** It is easy to prove that every formula in the range of [tautDenote] is true. *)
|
adamc@143
|
183
|
adamc@143
|
184 Theorem tautTrue : forall t, tautDenote t.
|
adamc@143
|
185 induction t; crush.
|
adamc@143
|
186 Qed.
|
adamc@143
|
187
|
adamc@143
|
188 (** To use [tautTrue] to prove particular formulas, we need to implement the syntax reflection process. A recursive Ltac function does the job. *)
|
adamc@143
|
189
|
adamc@143
|
190 Ltac tautReflect P :=
|
adamc@143
|
191 match P with
|
adamc@143
|
192 | True => TautTrue
|
adamc@143
|
193 | ?P1 /\ ?P2 =>
|
adamc@143
|
194 let t1 := tautReflect P1 in
|
adamc@143
|
195 let t2 := tautReflect P2 in
|
adamc@143
|
196 constr:(TautAnd t1 t2)
|
adamc@143
|
197 | ?P1 \/ ?P2 =>
|
adamc@143
|
198 let t1 := tautReflect P1 in
|
adamc@143
|
199 let t2 := tautReflect P2 in
|
adamc@143
|
200 constr:(TautOr t1 t2)
|
adamc@143
|
201 | ?P1 -> ?P2 =>
|
adamc@143
|
202 let t1 := tautReflect P1 in
|
adamc@143
|
203 let t2 := tautReflect P2 in
|
adamc@143
|
204 constr:(TautImp t1 t2)
|
adamc@143
|
205 end.
|
adamc@143
|
206
|
adamc@143
|
207 (** With [tautReflect] available, it is easy to finish our reflective tactic. We look at the goal formula, reflect it, and apply [tautTrue] to the reflected formula. *)
|
adamc@143
|
208
|
adamc@143
|
209 Ltac obvious :=
|
adamc@143
|
210 match goal with
|
adamc@143
|
211 | [ |- ?P ] =>
|
adamc@143
|
212 let t := tautReflect P in
|
adamc@143
|
213 exact (tautTrue t)
|
adamc@143
|
214 end.
|
adamc@143
|
215
|
adamc@143
|
216 (** We can verify that [obvious] solves our original example, with a proof term that does not mention details of the proof. *)
|
adamc@148
|
217 (* end thide *)
|
adamc@143
|
218
|
adamc@143
|
219 Theorem true_galore' : (True /\ True) -> (True \/ (True /\ (True -> True))).
|
adamc@143
|
220 obvious.
|
adamc@143
|
221 Qed.
|
adamc@143
|
222
|
adamc@143
|
223 Print true_galore'.
|
adamc@143
|
224
|
adamc@221
|
225 (** %\vspace{-.15in}% [[
|
adamc@143
|
226 true_galore' =
|
adamc@143
|
227 tautTrue
|
adamc@143
|
228 (TautImp (TautAnd TautTrue TautTrue)
|
adamc@143
|
229 (TautOr TautTrue (TautAnd TautTrue (TautImp TautTrue TautTrue))))
|
adamc@143
|
230 : True /\ True -> True \/ True /\ (True -> True)
|
adamc@221
|
231
|
adamc@143
|
232 ]]
|
adamc@143
|
233
|
adam@289
|
234 It is worth considering how the reflective tactic improves on a pure-Ltac implementation. The formula reflection process is just as ad-hoc as before, so we gain little there. In general, proofs will be more complicated than formula translation, and the %``%#"#generic proof rule#"#%''% that we apply here %\textit{%#<i>#is#</i>#%}% on much better formal footing than a recursive Ltac function. The dependent type of the proof guarantees that it %``%#"#works#"#%''% on any input formula. This is all in addition to the proof-size improvement that we have already seen. *)
|
adamc@144
|
235
|
adamc@144
|
236
|
adamc@145
|
237 (** * A Monoid Expression Simplifier *)
|
adamc@145
|
238
|
adam@289
|
239 (** Proof by reflection does not require encoding of all of the syntax in a goal. We can insert %``%#"#variables#"#%''% in our syntax types to allow injection of arbitrary pieces, even if we cannot apply specialized reasoning to them. In this section, we explore that possibility by writing a tactic for normalizing monoid equations. *)
|
adamc@146
|
240
|
adamc@145
|
241 Section monoid.
|
adamc@145
|
242 Variable A : Set.
|
adamc@145
|
243 Variable e : A.
|
adamc@145
|
244 Variable f : A -> A -> A.
|
adamc@145
|
245
|
adamc@145
|
246 Infix "+" := f.
|
adamc@145
|
247
|
adamc@145
|
248 Hypothesis assoc : forall a b c, (a + b) + c = a + (b + c).
|
adamc@145
|
249 Hypothesis identl : forall a, e + a = a.
|
adamc@145
|
250 Hypothesis identr : forall a, a + e = a.
|
adamc@145
|
251
|
adamc@146
|
252 (** We add variables and hypotheses characterizing an arbitrary instance of the algebraic structure of monoids. We have an associative binary operator and an identity element for it.
|
adamc@146
|
253
|
adam@289
|
254 It is easy to define an expression tree type for monoid expressions. A [Var] constructor is a %``%#"#catch-all#"#%''% case for subexpressions that we cannot model. These subexpressions could be actual Gallina variables, or they could just use functions that our tactic is unable to understand. *)
|
adamc@146
|
255
|
adamc@148
|
256 (* begin thide *)
|
adamc@145
|
257 Inductive mexp : Set :=
|
adamc@145
|
258 | Ident : mexp
|
adamc@145
|
259 | Var : A -> mexp
|
adamc@145
|
260 | Op : mexp -> mexp -> mexp.
|
adamc@145
|
261
|
adam@289
|
262 (** Next, we write an %``%#"#un-reflect#"#%''% function. *)
|
adamc@146
|
263
|
adamc@145
|
264 Fixpoint mdenote (me : mexp) : A :=
|
adamc@145
|
265 match me with
|
adamc@145
|
266 | Ident => e
|
adamc@145
|
267 | Var v => v
|
adamc@145
|
268 | Op me1 me2 => mdenote me1 + mdenote me2
|
adamc@145
|
269 end.
|
adamc@145
|
270
|
adamc@146
|
271 (** We will normalize expressions by flattening them into lists, via associativity, so it is helpful to have a denotation function for lists of monoid values. *)
|
adamc@146
|
272
|
adamc@145
|
273 Fixpoint mldenote (ls : list A) : A :=
|
adamc@145
|
274 match ls with
|
adamc@145
|
275 | nil => e
|
adamc@145
|
276 | x :: ls' => x + mldenote ls'
|
adamc@145
|
277 end.
|
adamc@145
|
278
|
adamc@146
|
279 (** The flattening function itself is easy to implement. *)
|
adamc@146
|
280
|
adamc@145
|
281 Fixpoint flatten (me : mexp) : list A :=
|
adamc@145
|
282 match me with
|
adamc@145
|
283 | Ident => nil
|
adamc@145
|
284 | Var x => x :: nil
|
adamc@145
|
285 | Op me1 me2 => flatten me1 ++ flatten me2
|
adamc@145
|
286 end.
|
adamc@145
|
287
|
adamc@146
|
288 (** [flatten] has a straightforward correctness proof in terms of our [denote] functions. *)
|
adamc@146
|
289
|
adamc@146
|
290 Lemma flatten_correct' : forall ml2 ml1,
|
adamc@146
|
291 mldenote ml1 + mldenote ml2 = mldenote (ml1 ++ ml2).
|
adamc@145
|
292 induction ml1; crush.
|
adamc@145
|
293 Qed.
|
adamc@145
|
294
|
adamc@145
|
295 Theorem flatten_correct : forall me, mdenote me = mldenote (flatten me).
|
adamc@145
|
296 Hint Resolve flatten_correct'.
|
adamc@145
|
297
|
adamc@145
|
298 induction me; crush.
|
adamc@145
|
299 Qed.
|
adamc@145
|
300
|
adamc@146
|
301 (** Now it is easy to prove a theorem that will be the main tool behind our simplification tactic. *)
|
adamc@146
|
302
|
adamc@146
|
303 Theorem monoid_reflect : forall me1 me2,
|
adamc@146
|
304 mldenote (flatten me1) = mldenote (flatten me2)
|
adamc@146
|
305 -> mdenote me1 = mdenote me2.
|
adamc@145
|
306 intros; repeat rewrite flatten_correct; assumption.
|
adamc@145
|
307 Qed.
|
adamc@145
|
308
|
adamc@146
|
309 (** We implement reflection into the [mexp] type. *)
|
adamc@146
|
310
|
adamc@146
|
311 Ltac reflect me :=
|
adamc@146
|
312 match me with
|
adamc@145
|
313 | e => Ident
|
adamc@146
|
314 | ?me1 + ?me2 =>
|
adamc@146
|
315 let r1 := reflect me1 in
|
adamc@146
|
316 let r2 := reflect me2 in
|
adamc@145
|
317 constr:(Op r1 r2)
|
adamc@146
|
318 | _ => constr:(Var me)
|
adamc@145
|
319 end.
|
adamc@145
|
320
|
adamc@146
|
321 (** The final [monoid] tactic works on goals that equate two monoid terms. We reflect each and change the goal to refer to the reflected versions, finishing off by applying [monoid_reflect] and simplifying uses of [mldenote]. *)
|
adamc@146
|
322
|
adamc@145
|
323 Ltac monoid :=
|
adamc@145
|
324 match goal with
|
adamc@146
|
325 | [ |- ?me1 = ?me2 ] =>
|
adamc@146
|
326 let r1 := reflect me1 in
|
adamc@146
|
327 let r2 := reflect me2 in
|
adamc@145
|
328 change (mdenote r1 = mdenote r2);
|
adamc@145
|
329 apply monoid_reflect; simpl mldenote
|
adamc@145
|
330 end.
|
adamc@145
|
331
|
adamc@146
|
332 (** We can make short work of theorems like this one: *)
|
adamc@146
|
333
|
adamc@148
|
334 (* end thide *)
|
adamc@148
|
335
|
adamc@145
|
336 Theorem t1 : forall a b c d, a + b + c + d = a + (b + c) + d.
|
adamc@146
|
337 intros; monoid.
|
adamc@146
|
338 (** [[
|
adamc@146
|
339 ============================
|
adamc@146
|
340 a + (b + (c + (d + e))) = a + (b + (c + (d + e)))
|
adamc@221
|
341
|
adamc@146
|
342 ]]
|
adamc@146
|
343
|
adamc@146
|
344 [monoid] has canonicalized both sides of the equality, such that we can finish the proof by reflexivity. *)
|
adamc@146
|
345
|
adamc@145
|
346 reflexivity.
|
adamc@145
|
347 Qed.
|
adamc@146
|
348
|
adamc@146
|
349 (** It is interesting to look at the form of the proof. *)
|
adamc@146
|
350
|
adamc@146
|
351 Print t1.
|
adamc@221
|
352 (** %\vspace{-.15in}% [[
|
adamc@146
|
353 t1 =
|
adamc@146
|
354 fun a b c d : A =>
|
adamc@146
|
355 monoid_reflect (Op (Op (Op (Var a) (Var b)) (Var c)) (Var d))
|
adamc@146
|
356 (Op (Op (Var a) (Op (Var b) (Var c))) (Var d))
|
adamc@146
|
357 (refl_equal (a + (b + (c + (d + e)))))
|
adamc@146
|
358 : forall a b c d : A, a + b + c + d = a + (b + c) + d
|
adamc@221
|
359
|
adamc@146
|
360 ]]
|
adamc@146
|
361
|
adamc@146
|
362 The proof term contains only restatements of the equality operands in reflected form, followed by a use of reflexivity on the shared canonical form. *)
|
adamc@221
|
363
|
adamc@145
|
364 End monoid.
|
adamc@145
|
365
|
adamc@146
|
366 (** Extensions of this basic approach are used in the implementations of the [ring] and [field] tactics that come packaged with Coq. *)
|
adamc@146
|
367
|
adamc@145
|
368
|
adamc@144
|
369 (** * A Smarter Tautology Solver *)
|
adamc@144
|
370
|
adamc@221
|
371 (** Now we are ready to revisit our earlier tautology solver example. We want to broaden the scope of the tactic to include formulas whose truth is not syntactically apparent. We will want to allow injection of arbitrary formulas, like we allowed arbitrary monoid expressions in the last example. Since we are working in a richer theory, it is important to be able to use equalities between different injected formulas. For instance, we cannot prove [P -> P] by translating the formula into a value like [Imp (Var P) (Var P)], because a Gallina function has no way of comparing the two [P]s for equality.
|
adamc@147
|
372
|
adamc@147
|
373 To arrive at a nice implementation satisfying these criteria, we introduce the [quote] tactic and its associated library. *)
|
adamc@147
|
374
|
adamc@144
|
375 Require Import Quote.
|
adamc@144
|
376
|
adamc@148
|
377 (* begin thide *)
|
adamc@144
|
378 Inductive formula : Set :=
|
adamc@144
|
379 | Atomic : index -> formula
|
adamc@144
|
380 | Truth : formula
|
adamc@144
|
381 | Falsehood : formula
|
adamc@144
|
382 | And : formula -> formula -> formula
|
adamc@144
|
383 | Or : formula -> formula -> formula
|
adamc@144
|
384 | Imp : formula -> formula -> formula.
|
adamc@144
|
385
|
adamc@147
|
386 (** The type [index] comes from the [Quote] library and represents a countable variable type. The rest of [formula]'s definition should be old hat by now.
|
adamc@147
|
387
|
adamc@147
|
388 The [quote] tactic will implement injection from [Prop] into [formula] for us, but it is not quite as smart as we might like. In particular, it interprets implications incorrectly, so we will need to declare a wrapper definition for implication, as we did in the last chapter. *)
|
adamc@144
|
389
|
adamc@144
|
390 Definition imp (P1 P2 : Prop) := P1 -> P2.
|
adamc@144
|
391 Infix "-->" := imp (no associativity, at level 95).
|
adamc@144
|
392
|
adamc@147
|
393 (** Now we can define our denotation function. *)
|
adamc@147
|
394
|
adamc@147
|
395 Definition asgn := varmap Prop.
|
adamc@147
|
396
|
adamc@144
|
397 Fixpoint formulaDenote (atomics : asgn) (f : formula) : Prop :=
|
adamc@144
|
398 match f with
|
adamc@144
|
399 | Atomic v => varmap_find False v atomics
|
adamc@144
|
400 | Truth => True
|
adamc@144
|
401 | Falsehood => False
|
adamc@144
|
402 | And f1 f2 => formulaDenote atomics f1 /\ formulaDenote atomics f2
|
adamc@144
|
403 | Or f1 f2 => formulaDenote atomics f1 \/ formulaDenote atomics f2
|
adamc@144
|
404 | Imp f1 f2 => formulaDenote atomics f1 --> formulaDenote atomics f2
|
adamc@144
|
405 end.
|
adamc@144
|
406
|
adamc@147
|
407 (** The [varmap] type family implements maps from [index] values. In this case, we define an assignment as a map from variables to [Prop]s. [formulaDenote] works with an assignment, and we use the [varmap_find] function to consult the assignment in the [Atomic] case. The first argument to [varmap_find] is a default value, in case the variable is not found. *)
|
adamc@147
|
408
|
adamc@144
|
409 Section my_tauto.
|
adamc@144
|
410 Variable atomics : asgn.
|
adamc@144
|
411
|
adamc@144
|
412 Definition holds (v : index) := varmap_find False v atomics.
|
adamc@144
|
413
|
adamc@147
|
414 (** We define some shorthand for a particular variable being true, and now we are ready to define some helpful functions based on the [ListSet] module of the standard library, which (unsurprisingly) presents a view of lists as sets. *)
|
adamc@147
|
415
|
adamc@144
|
416 Require Import ListSet.
|
adamc@144
|
417
|
adamc@144
|
418 Definition index_eq : forall x y : index, {x = y} + {x <> y}.
|
adamc@144
|
419 decide equality.
|
adamc@144
|
420 Defined.
|
adamc@144
|
421
|
adamc@144
|
422 Definition add (s : set index) (v : index) := set_add index_eq v s.
|
adamc@147
|
423
|
adamc@221
|
424 Definition In_dec : forall v (s : set index), {In v s} + {~ In v s}.
|
adamc@221
|
425 Local Open Scope specif_scope.
|
adamc@144
|
426
|
adamc@221
|
427 intro; refine (fix F (s : set index) : {In v s} + {~ In v s} :=
|
adamc@221
|
428 match s with
|
adamc@144
|
429 | nil => No
|
adamc@144
|
430 | v' :: s' => index_eq v' v || F s'
|
adamc@144
|
431 end); crush.
|
adamc@144
|
432 Defined.
|
adamc@144
|
433
|
adamc@147
|
434 (** We define what it means for all members of an index set to represent true propositions, and we prove some lemmas about this notion. *)
|
adamc@147
|
435
|
adamc@144
|
436 Fixpoint allTrue (s : set index) : Prop :=
|
adamc@144
|
437 match s with
|
adamc@144
|
438 | nil => True
|
adamc@144
|
439 | v :: s' => holds v /\ allTrue s'
|
adamc@144
|
440 end.
|
adamc@144
|
441
|
adamc@144
|
442 Theorem allTrue_add : forall v s,
|
adamc@144
|
443 allTrue s
|
adamc@144
|
444 -> holds v
|
adamc@144
|
445 -> allTrue (add s v).
|
adamc@144
|
446 induction s; crush;
|
adamc@144
|
447 match goal with
|
adamc@144
|
448 | [ |- context[if ?E then _ else _] ] => destruct E
|
adamc@144
|
449 end; crush.
|
adamc@144
|
450 Qed.
|
adamc@144
|
451
|
adamc@144
|
452 Theorem allTrue_In : forall v s,
|
adamc@144
|
453 allTrue s
|
adamc@144
|
454 -> set_In v s
|
adamc@144
|
455 -> varmap_find False v atomics.
|
adamc@144
|
456 induction s; crush.
|
adamc@144
|
457 Qed.
|
adamc@144
|
458
|
adamc@144
|
459 Hint Resolve allTrue_add allTrue_In.
|
adamc@144
|
460
|
adamc@221
|
461 Local Open Scope partial_scope.
|
adamc@144
|
462
|
adamc@147
|
463 (** Now we can write a function [forward] which implements deconstruction of hypotheses. It has a dependent type, in the style of Chapter 6, guaranteeing correctness. The arguments to [forward] are a goal formula [f], a set [known] of atomic formulas that we may assume are true, a hypothesis formula [hyp], and a success continuation [cont] that we call when we have extended [known] to hold new truths implied by [hyp]. *)
|
adamc@147
|
464
|
adamc@144
|
465 Definition forward (f : formula) (known : set index) (hyp : formula)
|
adamc@144
|
466 (cont : forall known', [allTrue known' -> formulaDenote atomics f])
|
adamc@144
|
467 : [allTrue known -> formulaDenote atomics hyp -> formulaDenote atomics f].
|
adamc@144
|
468 refine (fix F (f : formula) (known : set index) (hyp : formula)
|
adamc@221
|
469 (cont : forall known', [allTrue known' -> formulaDenote atomics f])
|
adamc@144
|
470 : [allTrue known -> formulaDenote atomics hyp -> formulaDenote atomics f] :=
|
adamc@221
|
471 match hyp with
|
adamc@144
|
472 | Atomic v => Reduce (cont (add known v))
|
adamc@144
|
473 | Truth => Reduce (cont known)
|
adamc@144
|
474 | Falsehood => Yes
|
adamc@144
|
475 | And h1 h2 =>
|
adamc@144
|
476 Reduce (F (Imp h2 f) known h1 (fun known' =>
|
adamc@144
|
477 Reduce (F f known' h2 cont)))
|
adamc@144
|
478 | Or h1 h2 => F f known h1 cont && F f known h2 cont
|
adamc@144
|
479 | Imp _ _ => Reduce (cont known)
|
adamc@144
|
480 end); crush.
|
adamc@144
|
481 Defined.
|
adamc@144
|
482
|
adamc@147
|
483 (** A [backward] function implements analysis of the final goal. It calls [forward] to handle implications. *)
|
adamc@147
|
484
|
adamc@221
|
485 Definition backward (known : set index) (f : formula)
|
adamc@221
|
486 : [allTrue known -> formulaDenote atomics f].
|
adamc@221
|
487 refine (fix F (known : set index) (f : formula)
|
adamc@221
|
488 : [allTrue known -> formulaDenote atomics f] :=
|
adamc@221
|
489 match f with
|
adamc@144
|
490 | Atomic v => Reduce (In_dec v known)
|
adamc@144
|
491 | Truth => Yes
|
adamc@144
|
492 | Falsehood => No
|
adamc@144
|
493 | And f1 f2 => F known f1 && F known f2
|
adamc@144
|
494 | Or f1 f2 => F known f1 || F known f2
|
adamc@144
|
495 | Imp f1 f2 => forward f2 known f1 (fun known' => F known' f2)
|
adamc@144
|
496 end); crush; eauto.
|
adamc@144
|
497 Defined.
|
adamc@144
|
498
|
adamc@147
|
499 (** A simple wrapper around [backward] gives us the usual type of a partial decision procedure. *)
|
adamc@147
|
500
|
adamc@144
|
501 Definition my_tauto (f : formula) : [formulaDenote atomics f].
|
adamc@144
|
502 intro; refine (Reduce (backward nil f)); crush.
|
adamc@144
|
503 Defined.
|
adamc@144
|
504 End my_tauto.
|
adamc@144
|
505
|
adamc@147
|
506 (** Our final tactic implementation is now fairly straightforward. First, we [intro] all quantifiers that do not bind [Prop]s. Then we call the [quote] tactic, which implements the reflection for us. Finally, we are able to construct an exact proof via [partialOut] and the [my_tauto] Gallina function. *)
|
adamc@147
|
507
|
adamc@144
|
508 Ltac my_tauto :=
|
adamc@144
|
509 repeat match goal with
|
adamc@144
|
510 | [ |- forall x : ?P, _ ] =>
|
adamc@144
|
511 match type of P with
|
adamc@144
|
512 | Prop => fail 1
|
adamc@144
|
513 | _ => intro
|
adamc@144
|
514 end
|
adamc@144
|
515 end;
|
adamc@144
|
516 quote formulaDenote;
|
adamc@144
|
517 match goal with
|
adamc@144
|
518 | [ |- formulaDenote ?m ?f ] => exact (partialOut (my_tauto m f))
|
adamc@144
|
519 end.
|
adamc@148
|
520 (* end thide *)
|
adamc@144
|
521
|
adamc@147
|
522 (** A few examples demonstrate how the tactic works. *)
|
adamc@147
|
523
|
adamc@144
|
524 Theorem mt1 : True.
|
adamc@144
|
525 my_tauto.
|
adamc@144
|
526 Qed.
|
adamc@144
|
527
|
adamc@144
|
528 Print mt1.
|
adamc@221
|
529 (** %\vspace{-.15in}% [[
|
adamc@147
|
530 mt1 = partialOut (my_tauto (Empty_vm Prop) Truth)
|
adamc@147
|
531 : True
|
adamc@221
|
532
|
adamc@147
|
533 ]]
|
adamc@147
|
534
|
adamc@147
|
535 We see [my_tauto] applied with an empty [varmap], since every subformula is handled by [formulaDenote]. *)
|
adamc@144
|
536
|
adamc@144
|
537 Theorem mt2 : forall x y : nat, x = y --> x = y.
|
adamc@144
|
538 my_tauto.
|
adamc@144
|
539 Qed.
|
adamc@144
|
540
|
adamc@144
|
541 Print mt2.
|
adamc@221
|
542 (** %\vspace{-.15in}% [[
|
adamc@147
|
543 mt2 =
|
adamc@147
|
544 fun x y : nat =>
|
adamc@147
|
545 partialOut
|
adamc@147
|
546 (my_tauto (Node_vm (x = y) (Empty_vm Prop) (Empty_vm Prop))
|
adamc@147
|
547 (Imp (Atomic End_idx) (Atomic End_idx)))
|
adamc@147
|
548 : forall x y : nat, x = y --> x = y
|
adamc@221
|
549
|
adamc@147
|
550 ]]
|
adamc@147
|
551
|
adamc@147
|
552 Crucially, both instances of [x = y] are represented with the same index, [End_idx]. The value of this index only needs to appear once in the [varmap], whose form reveals that [varmap]s are represented as binary trees, where [index] values denote paths from tree roots to leaves. *)
|
adamc@144
|
553
|
adamc@144
|
554 Theorem mt3 : forall x y z,
|
adamc@144
|
555 (x < y /\ y > z) \/ (y > z /\ x < S y)
|
adamc@144
|
556 --> y > z /\ (x < y \/ x < S y).
|
adamc@144
|
557 my_tauto.
|
adamc@144
|
558 Qed.
|
adamc@144
|
559
|
adamc@144
|
560 Print mt3.
|
adamc@221
|
561 (** %\vspace{-.15in}% [[
|
adamc@147
|
562 fun x y z : nat =>
|
adamc@147
|
563 partialOut
|
adamc@147
|
564 (my_tauto
|
adamc@147
|
565 (Node_vm (x < S y) (Node_vm (x < y) (Empty_vm Prop) (Empty_vm Prop))
|
adamc@147
|
566 (Node_vm (y > z) (Empty_vm Prop) (Empty_vm Prop)))
|
adamc@147
|
567 (Imp
|
adamc@147
|
568 (Or (And (Atomic (Left_idx End_idx)) (Atomic (Right_idx End_idx)))
|
adamc@147
|
569 (And (Atomic (Right_idx End_idx)) (Atomic End_idx)))
|
adamc@147
|
570 (And (Atomic (Right_idx End_idx))
|
adamc@147
|
571 (Or (Atomic (Left_idx End_idx)) (Atomic End_idx)))))
|
adamc@147
|
572 : forall x y z : nat,
|
adamc@147
|
573 x < y /\ y > z \/ y > z /\ x < S y --> y > z /\ (x < y \/ x < S y)
|
adamc@221
|
574
|
adamc@147
|
575 ]]
|
adamc@147
|
576
|
adamc@147
|
577 Our goal contained three distinct atomic formulas, and we see that a three-element [varmap] is generated.
|
adamc@147
|
578
|
adamc@147
|
579 It can be interesting to observe differences between the level of repetition in proof terms generated by [my_tauto] and [tauto] for especially trivial theorems. *)
|
adamc@144
|
580
|
adamc@144
|
581 Theorem mt4 : True /\ True /\ True /\ True /\ True /\ True /\ False --> False.
|
adamc@144
|
582 my_tauto.
|
adamc@144
|
583 Qed.
|
adamc@144
|
584
|
adamc@144
|
585 Print mt4.
|
adamc@221
|
586 (** %\vspace{-.15in}% [[
|
adamc@147
|
587 mt4 =
|
adamc@147
|
588 partialOut
|
adamc@147
|
589 (my_tauto (Empty_vm Prop)
|
adamc@147
|
590 (Imp
|
adamc@147
|
591 (And Truth
|
adamc@147
|
592 (And Truth
|
adamc@147
|
593 (And Truth (And Truth (And Truth (And Truth Falsehood))))))
|
adamc@147
|
594 Falsehood))
|
adamc@147
|
595 : True /\ True /\ True /\ True /\ True /\ True /\ False --> False
|
adamc@147
|
596 ]] *)
|
adamc@144
|
597
|
adamc@144
|
598 Theorem mt4' : True /\ True /\ True /\ True /\ True /\ True /\ False -> False.
|
adamc@144
|
599 tauto.
|
adamc@144
|
600 Qed.
|
adamc@144
|
601
|
adamc@144
|
602 Print mt4'.
|
adamc@221
|
603 (** %\vspace{-.15in}% [[
|
adamc@147
|
604 mt4' =
|
adamc@147
|
605 fun H : True /\ True /\ True /\ True /\ True /\ True /\ False =>
|
adamc@147
|
606 and_ind
|
adamc@147
|
607 (fun (_ : True) (H1 : True /\ True /\ True /\ True /\ True /\ False) =>
|
adamc@147
|
608 and_ind
|
adamc@147
|
609 (fun (_ : True) (H3 : True /\ True /\ True /\ True /\ False) =>
|
adamc@147
|
610 and_ind
|
adamc@147
|
611 (fun (_ : True) (H5 : True /\ True /\ True /\ False) =>
|
adamc@147
|
612 and_ind
|
adamc@147
|
613 (fun (_ : True) (H7 : True /\ True /\ False) =>
|
adamc@147
|
614 and_ind
|
adamc@147
|
615 (fun (_ : True) (H9 : True /\ False) =>
|
adamc@147
|
616 and_ind (fun (_ : True) (H11 : False) => False_ind False H11)
|
adamc@147
|
617 H9) H7) H5) H3) H1) H
|
adamc@147
|
618 : True /\ True /\ True /\ True /\ True /\ True /\ False -> False
|
adamc@147
|
619 ]] *)
|
adamc@147
|
620
|
adamc@149
|
621
|
adamc@149
|
622 (** * Exercises *)
|
adamc@149
|
623
|
adamc@221
|
624 (** remove printing * *)
|
adamc@221
|
625
|
adamc@149
|
626 (** %\begin{enumerate}%#<ol>#
|
adamc@149
|
627
|
adamc@149
|
628 %\item%#<li># Implement a reflective procedure for normalizing systems of linear equations over rational numbers. In particular, the tactic should identify all hypotheses that are linear equations over rationals where the equation righthand sides are constants. It should normalize each hypothesis to have a lefthand side that is a sum of products of constants and variables, with no variable appearing multiple times. Then, your tactic should add together all of these equations to form a single new equation, possibly clearing the original equations. Some coefficients may cancel in the addition, reducing the number of variables that appear.
|
adamc@149
|
629
|
adamc@221
|
630 To work with rational numbers, import module [QArith] and use [Local Open Scope Q_scope]. All of the usual arithmetic operator notations will then work with rationals, and there are shorthands for constants 0 and 1. Other rationals must be written as [num # den] for numerator [num] and denominator [den]. Use the infix operator [==] in place of [=], to deal with different ways of expressing the same number as a fraction. For instance, a theorem and proof like this one should work with your tactic:
|
adamc@149
|
631
|
adamc@149
|
632 [[
|
adamc@149
|
633 Theorem t2 : forall x y z, (2 # 1) * (x - (3 # 2) * y) == 15 # 1
|
adamc@149
|
634 -> z + (8 # 1) * x == 20 # 1
|
adamc@149
|
635 -> (-6 # 2) * y + (10 # 1) * x + z == 35 # 1.
|
adamc@149
|
636 intros; reflectContext; assumption.
|
adamc@149
|
637 Qed.
|
adamc@221
|
638
|
adamc@205
|
639 ]]
|
adamc@205
|
640
|
adamc@149
|
641 Your solution can work in any way that involves reflecting syntax and doing most calculation with a Gallina function. These hints outline a particular possible solution. Throughout, the [ring] tactic will be helpful for proving many simple facts about rationals, and tactics like [rewrite] are correctly overloaded to work with rational equality [==].
|
adamc@149
|
642
|
adamc@149
|
643 %\begin{enumerate}%#<ol>#
|
adamc@221
|
644 %\item%#<li># Define an inductive type [exp] of expressions over rationals (which inhabit the Coq type [Q]). Include variables (represented as natural numbers), constants, addition, subtraction, and multiplication.#</li>#
|
adamc@149
|
645 %\item%#<li># Define a function [lookup] for reading an element out of a list of rationals, by its position in the list.#</li>#
|
adamc@149
|
646 %\item%#<li># Define a function [expDenote] that translates [exp]s, along with lists of rationals representing variable values, to [Q].#</li>#
|
adamc@149
|
647 %\item%#<li># Define a recursive function [eqsDenote] over [list (exp * Q)], characterizing when all of the equations are true.#</li>#
|
adamc@149
|
648 %\item%#<li># Fix a representation [lhs] of flattened expressions. Where [len] is the number of variables, represent a flattened equation as [ilist Q len]. Each position of the list gives the coefficient of the corresponding variable.#</li>#
|
adamc@151
|
649 %\item%#<li># Write a recursive function [linearize] that takes a constant [k] and an expression [e] and optionally returns an [lhs] equivalent to [k * e]. This function returns [None] when it discovers that the input expression is not linear. The parameter [len] of [lhs] should be a parameter of [linearize], too. The functions [singleton], [everywhere], and [map2] from [DepList] will probably be helpful. It is also helpful to know that [Qplus] is the identifier for rational addition.#</li>#
|
adamc@149
|
650 %\item%#<li># Write a recursive function [linearizeEqs : list (exp * Q) -> option (lhs * Q)]. This function linearizes all of the equations in the list in turn, building up the sum of the equations. It returns [None] if the linearization of any constituent equation fails.#</li>#
|
adamc@149
|
651 %\item%#<li># Define a denotation function for [lhs].#</li>#
|
adamc@149
|
652 %\item%#<li># Prove that, when [exp] linearization succeeds on constant [k] and expression [e], the linearized version has the same meaning as [k * e].#</li>#
|
adamc@149
|
653 %\item%#<li># Prove that, when [linearizeEqs] succeeds on an equation list [eqs], then the final summed-up equation is true whenever the original equation list is true.#</li>#
|
adamc@149
|
654 %\item%#<li># Write a tactic [findVarsHyps] to search through all equalities on rationals in the context, recursing through addition, subtraction, and multiplication to find the list of expressions that should be treated as variables. This list should be suitable as an argument to [expDenote] and [eqsDenote], associating a [Q] value to each natural number that stands for a variable.#</li>#
|
adamc@149
|
655 %\item%#<li># Write a tactic [reflect] to reflect a [Q] expression into [exp], with respect to a given list of variable values.#</li>#
|
adamc@149
|
656 %\item%#<li># Write a tactic [reflectEqs] to reflect a formula that begins with a sequence of implications from linear equalities whose lefthand sides are expressed with [expDenote]. This tactic should build a [list (exp * Q)] representing the equations. Remember to give an explicit type annotation when returning a nil list, as in [constr:(@nil (exp * Q))].#</li>#
|
adamc@149
|
657 %\item%#<li># Now this final tactic should do the job:
|
adamc@221
|
658
|
adamc@149
|
659 [[
|
adamc@149
|
660 Ltac reflectContext :=
|
adamc@149
|
661 let ls := findVarsHyps in
|
adamc@149
|
662 repeat match goal with
|
adamc@149
|
663 | [ H : ?e == ?num # ?den |- _ ] =>
|
adamc@149
|
664 let r := reflect ls e in
|
adamc@149
|
665 change (expDenote ls r == num # den) in H;
|
adamc@149
|
666 generalize H
|
adamc@149
|
667 end;
|
adamc@149
|
668 match goal with
|
adamc@149
|
669 | [ |- ?g ] => let re := reflectEqs g in
|
adamc@149
|
670 intros;
|
adamc@149
|
671 let H := fresh "H" in
|
adamc@149
|
672 assert (H : eqsDenote ls re); [ simpl in *; tauto
|
adamc@149
|
673 | repeat match goal with
|
adamc@149
|
674 | [ H : expDenote _ _ == _ |- _ ] => clear H
|
adamc@149
|
675 end;
|
adamc@149
|
676 generalize (linearizeEqsCorrect ls re H); clear H; simpl;
|
adamc@149
|
677 match goal with
|
adamc@149
|
678 | [ |- ?X == ?Y -> _ ] =>
|
adamc@149
|
679 ring_simplify X Y; intro
|
adamc@149
|
680 end ]
|
adamc@149
|
681 end.
|
adamc@149
|
682
|
adamc@205
|
683 ]]
|
adamc@205
|
684
|
adamc@149
|
685 #</ol>#%\end{enumerate}%
|
adamc@149
|
686 #</li>#
|
adamc@149
|
687
|
adamc@149
|
688 #</ol>#%\end{enumerate}% *)
|