annotate src/Reflection.v @ 167:ef485f86a7b6

Cleaning up BigStep proofs
author Adam Chlipala <adamc@hcoop.net>
date Wed, 05 Nov 2008 09:29:15 -0500
parents e71904f61e69
children f05514cc6c0d
rev   line source
adamc@142 1 (* Copyright (c) 2008, Adam Chlipala
adamc@142 2 *
adamc@142 3 * This work is licensed under a
adamc@142 4 * Creative Commons Attribution-Noncommercial-No Derivative Works 3.0
adamc@142 5 * Unported License.
adamc@142 6 * The license text is available at:
adamc@142 7 * http://creativecommons.org/licenses/by-nc-nd/3.0/
adamc@142 8 *)
adamc@142 9
adamc@142 10 (* begin hide *)
adamc@142 11 Require Import List.
adamc@142 12
adamc@142 13 Require Import Tactics MoreSpecif.
adamc@142 14
adamc@142 15 Set Implicit Arguments.
adamc@142 16 (* end hide *)
adamc@142 17
adamc@142 18
adamc@142 19 (** %\chapter{Proof by Reflection}% *)
adamc@142 20
adamc@142 21 (** The last chapter highlighted a very heuristic approach to proving. In this chapter, we will study an alternative technique, %\textit{%#<i>#proof by reflection#</i>#%}%. We will write, in Gallina, decision procedures with proofs of correctness, and we will appeal to these procedures in writing very short proofs. Such a proof is checked by running the decision procedure. The term %\textit{%#<i>#reflection#</i>#%}% applies because we will need to translate Gallina propositions into values of inductive types representing syntax, so that Gallina programs may analyze them. *)
adamc@142 22
adamc@142 23
adamc@142 24 (** * Proving Evenness *)
adamc@142 25
adamc@142 26 (** Proving that particular natural number constants are even is certainly something we would rather have happen automatically. The Ltac-programming techniques that we learned in the last chapter make it easy to implement such a procedure. *)
adamc@142 27
adamc@142 28 Inductive isEven : nat -> Prop :=
adamc@144 29 | Even_O : isEven O
adamc@144 30 | Even_SS : forall n, isEven n -> isEven (S (S n)).
adamc@142 31
adamc@148 32 (* begin thide *)
adamc@142 33 Ltac prove_even := repeat constructor.
adamc@148 34 (* end thide *)
adamc@142 35
adamc@142 36 Theorem even_256 : isEven 256.
adamc@142 37 prove_even.
adamc@142 38 Qed.
adamc@142 39
adamc@142 40 Print even_256.
adamc@142 41 (** [[
adamc@142 42
adamc@142 43 even_256 =
adamc@142 44 Even_SS
adamc@142 45 (Even_SS
adamc@142 46 (Even_SS
adamc@142 47 (Even_SS
adamc@142 48 ]]
adamc@142 49
adamc@142 50 ...and so on. This procedure always works (at least on machines with infinite resources), but it has a serious drawback, which we see when we print the proof it generates that 256 is even. The final proof term has length linear in the input value. This seems like a shame, since we could write a trivial and trustworthy program to verify evenness of constants. The proof checker could simply call our program where needed.
adamc@142 51
adamc@142 52 It is also unfortunate not to have static typing guarantees that our tactic always behaves appropriately. Other invocations of similar tactics might fail with dynamic type errors, and we would not know about the bugs behind these errors until we happened to attempt to prove complex enough goals.
adamc@142 53
adamc@142 54 The techniques of proof by reflection address both complaints. We will be able to write proofs like this with constant size overhead beyond the size of the input, and we will do it with verified decision procedures written in Gallina.
adamc@142 55
adamc@142 56 For this example, we begin by using a type from the [MoreSpecif] module to write a certified evenness checker. *)
adamc@142 57
adamc@142 58 Print partial.
adamc@142 59 (** [[
adamc@142 60
adamc@142 61 Inductive partial (P : Prop) : Set := Proved : P -> [P] | Uncertain : [P]
adamc@142 62 ]] *)
adamc@142 63
adamc@142 64 (** A [partial P] value is an optional proof of [P]. The notation [[P]] stands for [partial P]. *)
adamc@142 65
adamc@142 66 Open Local Scope partial_scope.
adamc@142 67
adamc@142 68 (** We bring into scope some notations for the [partial] type. These overlap with some of the notations we have seen previously for specification types, so they were placed in a separate scope that needs separate opening. *)
adamc@142 69
adamc@148 70 (* begin thide *)
adamc@142 71 Definition check_even (n : nat) : [isEven n].
adamc@142 72 Hint Constructors isEven.
adamc@142 73
adamc@142 74 refine (fix F (n : nat) : [isEven n] :=
adamc@142 75 match n return [isEven n] with
adamc@142 76 | 0 => Yes
adamc@142 77 | 1 => No
adamc@142 78 | S (S n') => Reduce (F n')
adamc@142 79 end); auto.
adamc@142 80 Defined.
adamc@142 81
adamc@142 82 (** We can use dependent pattern-matching to write a function that performs a surprising feat. When given a [partial P], this function [partialOut] returns a proof of [P] if the [partial] value contains a proof, and it returns a (useless) proof of [True] otherwise. From the standpoint of ML and Haskell programming, it seems impossible to write such a type, but it is trivial with a [return] annotation. *)
adamc@142 83
adamc@142 84 Definition partialOut (P : Prop) (x : [P]) :=
adamc@142 85 match x return (match x with
adamc@142 86 | Proved _ => P
adamc@142 87 | Uncertain => True
adamc@142 88 end) with
adamc@142 89 | Proved pf => pf
adamc@142 90 | Uncertain => I
adamc@142 91 end.
adamc@142 92
adamc@142 93 (** It may seem strange to define a function like this. However, it turns out to be very useful in writing a reflective verison of our earlier [prove_even] tactic: *)
adamc@142 94
adamc@142 95 Ltac prove_even_reflective :=
adamc@142 96 match goal with
adamc@142 97 | [ |- isEven ?N] => exact (partialOut (check_even N))
adamc@142 98 end.
adamc@148 99 (* end thide *)
adamc@142 100
adamc@142 101 (** We identify which natural number we are considering, and we "prove" its evenness by pulling the proof out of the appropriate [check_even] call. *)
adamc@142 102
adamc@142 103 Theorem even_256' : isEven 256.
adamc@142 104 prove_even_reflective.
adamc@142 105 Qed.
adamc@142 106
adamc@142 107 Print even_256'.
adamc@142 108 (** [[
adamc@142 109
adamc@142 110 even_256' = partialOut (check_even 256)
adamc@142 111 : isEven 256
adamc@142 112 ]]
adamc@142 113
adamc@142 114 We can see a constant wrapper around the object of the proof. For any even number, this form of proof will suffice. What happens if we try the tactic with an odd number? *)
adamc@142 115
adamc@142 116 Theorem even_255 : isEven 255.
adamc@142 117 (** [[
adamc@142 118
adamc@142 119 prove_even_reflective.
adamc@142 120
adamc@142 121 [[
adamc@142 122
adamc@142 123 User error: No matching clauses for match goal
adamc@142 124 ]]
adamc@142 125
adamc@142 126 Thankfully, the tactic fails. To see more precisely what goes wrong, we can run manually the body of the [match].
adamc@142 127
adamc@142 128 [[
adamc@142 129
adamc@142 130 exact (partialOut (check_even 255)).
adamc@142 131
adamc@142 132 [[
adamc@142 133
adamc@142 134 Error: The term "partialOut (check_even 255)" has type
adamc@142 135 "match check_even 255 with
adamc@142 136 | Yes => isEven 255
adamc@142 137 | No => True
adamc@142 138 end" while it is expected to have type "isEven 255"
adamc@142 139 ]]
adamc@142 140
adamc@142 141 As usual, the type-checker performs no reductions to simplify error messages. If we reduced the first term ourselves, we would see that [check_even 255] reduces to a [No], so that the first term is equivalent to [True], which certainly does not unify with [isEven 255]. *)
adamc@142 142 Abort.
adamc@143 143
adamc@143 144
adamc@143 145 (** * Reflecting the Syntax of a Trivial Tautology Language *)
adamc@143 146
adamc@143 147 (** We might also like to have reflective proofs of trivial tautologies like this one: *)
adamc@143 148
adamc@143 149 Theorem true_galore : (True /\ True) -> (True \/ (True /\ (True -> True))).
adamc@143 150 tauto.
adamc@143 151 Qed.
adamc@143 152
adamc@143 153 Print true_galore.
adamc@143 154
adamc@143 155 (** [[
adamc@143 156
adamc@143 157 true_galore =
adamc@143 158 fun H : True /\ True =>
adamc@143 159 and_ind (fun _ _ : True => or_introl (True /\ (True -> True)) I) H
adamc@143 160 : True /\ True -> True \/ True /\ (True -> True)
adamc@143 161 ]]
adamc@143 162
adamc@143 163 As we might expect, the proof that [tauto] builds contains explicit applications of natural deduction rules. For large formulas, this can add a linear amount of proof size overhead, beyond the size of the input.
adamc@143 164
adamc@143 165 To write a reflective procedure for this class of goals, we will need to get into the actual "reflection" part of "proof by reflection." It is impossible to case-analyze a [Prop] in any way in Gallina. We must %\textit{%#<i>#reflect#</i>#%}% [Prop] into some type that we %\textit{%#<i>#can#</i>#%}% analyze. This inductive type is a good candidate: *)
adamc@143 166
adamc@148 167 (* begin thide *)
adamc@143 168 Inductive taut : Set :=
adamc@143 169 | TautTrue : taut
adamc@143 170 | TautAnd : taut -> taut -> taut
adamc@143 171 | TautOr : taut -> taut -> taut
adamc@143 172 | TautImp : taut -> taut -> taut.
adamc@143 173
adamc@143 174 (** We write a recursive function to "unreflect" this syntax back to [Prop]. *)
adamc@143 175
adamc@143 176 Fixpoint tautDenote (t : taut) : Prop :=
adamc@143 177 match t with
adamc@143 178 | TautTrue => True
adamc@143 179 | TautAnd t1 t2 => tautDenote t1 /\ tautDenote t2
adamc@143 180 | TautOr t1 t2 => tautDenote t1 \/ tautDenote t2
adamc@143 181 | TautImp t1 t2 => tautDenote t1 -> tautDenote t2
adamc@143 182 end.
adamc@143 183
adamc@143 184 (** It is easy to prove that every formula in the range of [tautDenote] is true. *)
adamc@143 185
adamc@143 186 Theorem tautTrue : forall t, tautDenote t.
adamc@143 187 induction t; crush.
adamc@143 188 Qed.
adamc@143 189
adamc@143 190 (** To use [tautTrue] to prove particular formulas, we need to implement the syntax reflection process. A recursive Ltac function does the job. *)
adamc@143 191
adamc@143 192 Ltac tautReflect P :=
adamc@143 193 match P with
adamc@143 194 | True => TautTrue
adamc@143 195 | ?P1 /\ ?P2 =>
adamc@143 196 let t1 := tautReflect P1 in
adamc@143 197 let t2 := tautReflect P2 in
adamc@143 198 constr:(TautAnd t1 t2)
adamc@143 199 | ?P1 \/ ?P2 =>
adamc@143 200 let t1 := tautReflect P1 in
adamc@143 201 let t2 := tautReflect P2 in
adamc@143 202 constr:(TautOr t1 t2)
adamc@143 203 | ?P1 -> ?P2 =>
adamc@143 204 let t1 := tautReflect P1 in
adamc@143 205 let t2 := tautReflect P2 in
adamc@143 206 constr:(TautImp t1 t2)
adamc@143 207 end.
adamc@143 208
adamc@143 209 (** With [tautReflect] available, it is easy to finish our reflective tactic. We look at the goal formula, reflect it, and apply [tautTrue] to the reflected formula. *)
adamc@143 210
adamc@143 211 Ltac obvious :=
adamc@143 212 match goal with
adamc@143 213 | [ |- ?P ] =>
adamc@143 214 let t := tautReflect P in
adamc@143 215 exact (tautTrue t)
adamc@143 216 end.
adamc@143 217
adamc@143 218 (** We can verify that [obvious] solves our original example, with a proof term that does not mention details of the proof. *)
adamc@148 219 (* end thide *)
adamc@143 220
adamc@143 221 Theorem true_galore' : (True /\ True) -> (True \/ (True /\ (True -> True))).
adamc@143 222 obvious.
adamc@143 223 Qed.
adamc@143 224
adamc@143 225 Print true_galore'.
adamc@143 226
adamc@143 227 (** [[
adamc@143 228
adamc@143 229 true_galore' =
adamc@143 230 tautTrue
adamc@143 231 (TautImp (TautAnd TautTrue TautTrue)
adamc@143 232 (TautOr TautTrue (TautAnd TautTrue (TautImp TautTrue TautTrue))))
adamc@143 233 : True /\ True -> True \/ True /\ (True -> True)
adamc@143 234
adamc@143 235 ]]
adamc@143 236
adamc@143 237 It is worth considering how the reflective tactic improves on a pure-Ltac implementation. The formula reflection process is just as ad-hoc as before, so we gain little there. In general, proofs will be more complicated than formula translation, and the "generic proof rule" that we apply here %\textit{%#<i>#is#</i>#%}% on much better formal footing than a recursive Ltac function. The dependent type of the proof guarantees that it "works" on any input formula. This is all in addition to the proof-size improvement that we have already seen. *)
adamc@144 238
adamc@144 239
adamc@145 240 (** * A Monoid Expression Simplifier *)
adamc@145 241
adamc@146 242 (** Proof by reflection does not require encoding of all of the syntax in a goal. We can insert "variables" in our syntax types to allow injection of arbitrary pieces, even if we cannot apply specialized reasoning to them. In this section, we explore that possibility by writing a tactic for normalizing monoid equations. *)
adamc@146 243
adamc@145 244 Section monoid.
adamc@145 245 Variable A : Set.
adamc@145 246 Variable e : A.
adamc@145 247 Variable f : A -> A -> A.
adamc@145 248
adamc@145 249 Infix "+" := f.
adamc@145 250
adamc@145 251 Hypothesis assoc : forall a b c, (a + b) + c = a + (b + c).
adamc@145 252 Hypothesis identl : forall a, e + a = a.
adamc@145 253 Hypothesis identr : forall a, a + e = a.
adamc@145 254
adamc@146 255 (** We add variables and hypotheses characterizing an arbitrary instance of the algebraic structure of monoids. We have an associative binary operator and an identity element for it.
adamc@146 256
adamc@146 257 It is easy to define an expression tree type for monoid expressions. A [Var] constructor is a "catch-all" case for subexpressions that we cannot model. These subexpressions could be actual Gallina variables, or they could just use functions that our tactic is unable to understand. *)
adamc@146 258
adamc@148 259 (* begin thide *)
adamc@145 260 Inductive mexp : Set :=
adamc@145 261 | Ident : mexp
adamc@145 262 | Var : A -> mexp
adamc@145 263 | Op : mexp -> mexp -> mexp.
adamc@145 264
adamc@146 265 (** Next, we write an "un-reflect" function. *)
adamc@146 266
adamc@145 267 Fixpoint mdenote (me : mexp) : A :=
adamc@145 268 match me with
adamc@145 269 | Ident => e
adamc@145 270 | Var v => v
adamc@145 271 | Op me1 me2 => mdenote me1 + mdenote me2
adamc@145 272 end.
adamc@145 273
adamc@146 274 (** We will normalize expressions by flattening them into lists, via associativity, so it is helpful to have a denotation function for lists of monoid values. *)
adamc@146 275
adamc@145 276 Fixpoint mldenote (ls : list A) : A :=
adamc@145 277 match ls with
adamc@145 278 | nil => e
adamc@145 279 | x :: ls' => x + mldenote ls'
adamc@145 280 end.
adamc@145 281
adamc@146 282 (** The flattening function itself is easy to implement. *)
adamc@146 283
adamc@145 284 Fixpoint flatten (me : mexp) : list A :=
adamc@145 285 match me with
adamc@145 286 | Ident => nil
adamc@145 287 | Var x => x :: nil
adamc@145 288 | Op me1 me2 => flatten me1 ++ flatten me2
adamc@145 289 end.
adamc@145 290
adamc@146 291 (** [flatten] has a straightforward correctness proof in terms of our [denote] functions. *)
adamc@146 292
adamc@146 293 Lemma flatten_correct' : forall ml2 ml1,
adamc@146 294 mldenote ml1 + mldenote ml2 = mldenote (ml1 ++ ml2).
adamc@145 295 induction ml1; crush.
adamc@145 296 Qed.
adamc@145 297
adamc@145 298 Theorem flatten_correct : forall me, mdenote me = mldenote (flatten me).
adamc@145 299 Hint Resolve flatten_correct'.
adamc@145 300
adamc@145 301 induction me; crush.
adamc@145 302 Qed.
adamc@145 303
adamc@146 304 (** Now it is easy to prove a theorem that will be the main tool behind our simplification tactic. *)
adamc@146 305
adamc@146 306 Theorem monoid_reflect : forall me1 me2,
adamc@146 307 mldenote (flatten me1) = mldenote (flatten me2)
adamc@146 308 -> mdenote me1 = mdenote me2.
adamc@145 309 intros; repeat rewrite flatten_correct; assumption.
adamc@145 310 Qed.
adamc@145 311
adamc@146 312 (** We implement reflection into the [mexp] type. *)
adamc@146 313
adamc@146 314 Ltac reflect me :=
adamc@146 315 match me with
adamc@145 316 | e => Ident
adamc@146 317 | ?me1 + ?me2 =>
adamc@146 318 let r1 := reflect me1 in
adamc@146 319 let r2 := reflect me2 in
adamc@145 320 constr:(Op r1 r2)
adamc@146 321 | _ => constr:(Var me)
adamc@145 322 end.
adamc@145 323
adamc@146 324 (** The final [monoid] tactic works on goals that equate two monoid terms. We reflect each and change the goal to refer to the reflected versions, finishing off by applying [monoid_reflect] and simplifying uses of [mldenote]. *)
adamc@146 325
adamc@145 326 Ltac monoid :=
adamc@145 327 match goal with
adamc@146 328 | [ |- ?me1 = ?me2 ] =>
adamc@146 329 let r1 := reflect me1 in
adamc@146 330 let r2 := reflect me2 in
adamc@145 331 change (mdenote r1 = mdenote r2);
adamc@145 332 apply monoid_reflect; simpl mldenote
adamc@145 333 end.
adamc@145 334
adamc@146 335 (** We can make short work of theorems like this one: *)
adamc@146 336
adamc@148 337 (* end thide *)
adamc@148 338
adamc@145 339 Theorem t1 : forall a b c d, a + b + c + d = a + (b + c) + d.
adamc@146 340 intros; monoid.
adamc@146 341 (** [[
adamc@146 342
adamc@146 343 ============================
adamc@146 344 a + (b + (c + (d + e))) = a + (b + (c + (d + e)))
adamc@146 345 ]]
adamc@146 346
adamc@146 347 [monoid] has canonicalized both sides of the equality, such that we can finish the proof by reflexivity. *)
adamc@146 348
adamc@145 349 reflexivity.
adamc@145 350 Qed.
adamc@146 351
adamc@146 352 (** It is interesting to look at the form of the proof. *)
adamc@146 353
adamc@146 354 Print t1.
adamc@146 355 (** [[
adamc@146 356
adamc@146 357 t1 =
adamc@146 358 fun a b c d : A =>
adamc@146 359 monoid_reflect (Op (Op (Op (Var a) (Var b)) (Var c)) (Var d))
adamc@146 360 (Op (Op (Var a) (Op (Var b) (Var c))) (Var d))
adamc@146 361 (refl_equal (a + (b + (c + (d + e)))))
adamc@146 362 : forall a b c d : A, a + b + c + d = a + (b + c) + d
adamc@146 363 ]]
adamc@146 364
adamc@146 365 The proof term contains only restatements of the equality operands in reflected form, followed by a use of reflexivity on the shared canonical form. *)
adamc@145 366 End monoid.
adamc@145 367
adamc@146 368 (** Extensions of this basic approach are used in the implementations of the [ring] and [field] tactics that come packaged with Coq. *)
adamc@146 369
adamc@145 370
adamc@144 371 (** * A Smarter Tautology Solver *)
adamc@144 372
adamc@147 373 (** Now we are ready to revisit our earlier tautology solver example. We want to broaden the scope of the tactic to include formulas whose truth is not syntactically apparent. We will want to allow injection of arbitrary formulas, like we allowed arbitrary monoid expressions in the last example. Since we are working in a richer theory, it is important to be able to use equalities between different injected formulas. For instance, we cannott prove [P -> P] by translating the formula into a value like [Imp (Var P) (Var P)], because a Gallina function has no way of comparing the two [P]s for equality.
adamc@147 374
adamc@147 375 To arrive at a nice implementation satisfying these criteria, we introduce the [quote] tactic and its associated library. *)
adamc@147 376
adamc@144 377 Require Import Quote.
adamc@144 378
adamc@148 379 (* begin thide *)
adamc@144 380 Inductive formula : Set :=
adamc@144 381 | Atomic : index -> formula
adamc@144 382 | Truth : formula
adamc@144 383 | Falsehood : formula
adamc@144 384 | And : formula -> formula -> formula
adamc@144 385 | Or : formula -> formula -> formula
adamc@144 386 | Imp : formula -> formula -> formula.
adamc@144 387
adamc@147 388 (** The type [index] comes from the [Quote] library and represents a countable variable type. The rest of [formula]'s definition should be old hat by now.
adamc@147 389
adamc@147 390 The [quote] tactic will implement injection from [Prop] into [formula] for us, but it is not quite as smart as we might like. In particular, it interprets implications incorrectly, so we will need to declare a wrapper definition for implication, as we did in the last chapter. *)
adamc@144 391
adamc@144 392 Definition imp (P1 P2 : Prop) := P1 -> P2.
adamc@144 393 Infix "-->" := imp (no associativity, at level 95).
adamc@144 394
adamc@147 395 (** Now we can define our denotation function. *)
adamc@147 396
adamc@147 397 Definition asgn := varmap Prop.
adamc@147 398
adamc@144 399 Fixpoint formulaDenote (atomics : asgn) (f : formula) : Prop :=
adamc@144 400 match f with
adamc@144 401 | Atomic v => varmap_find False v atomics
adamc@144 402 | Truth => True
adamc@144 403 | Falsehood => False
adamc@144 404 | And f1 f2 => formulaDenote atomics f1 /\ formulaDenote atomics f2
adamc@144 405 | Or f1 f2 => formulaDenote atomics f1 \/ formulaDenote atomics f2
adamc@144 406 | Imp f1 f2 => formulaDenote atomics f1 --> formulaDenote atomics f2
adamc@144 407 end.
adamc@144 408
adamc@147 409 (** The [varmap] type family implements maps from [index] values. In this case, we define an assignment as a map from variables to [Prop]s. [formulaDenote] works with an assignment, and we use the [varmap_find] function to consult the assignment in the [Atomic] case. The first argument to [varmap_find] is a default value, in case the variable is not found. *)
adamc@147 410
adamc@144 411 Section my_tauto.
adamc@144 412 Variable atomics : asgn.
adamc@144 413
adamc@144 414 Definition holds (v : index) := varmap_find False v atomics.
adamc@144 415
adamc@147 416 (** We define some shorthand for a particular variable being true, and now we are ready to define some helpful functions based on the [ListSet] module of the standard library, which (unsurprisingly) presents a view of lists as sets. *)
adamc@147 417
adamc@144 418 Require Import ListSet.
adamc@144 419
adamc@144 420 Definition index_eq : forall x y : index, {x = y} + {x <> y}.
adamc@144 421 decide equality.
adamc@144 422 Defined.
adamc@144 423
adamc@144 424 Definition add (s : set index) (v : index) := set_add index_eq v s.
adamc@147 425
adamc@144 426 Definition In_dec : forall v (s : set index), {In v s} + {~In v s}.
adamc@144 427 Open Local Scope specif_scope.
adamc@144 428
adamc@144 429 intro; refine (fix F (s : set index) : {In v s} + {~In v s} :=
adamc@144 430 match s return {In v s} + {~In v s} with
adamc@144 431 | nil => No
adamc@144 432 | v' :: s' => index_eq v' v || F s'
adamc@144 433 end); crush.
adamc@144 434 Defined.
adamc@144 435
adamc@147 436 (** We define what it means for all members of an index set to represent true propositions, and we prove some lemmas about this notion. *)
adamc@147 437
adamc@144 438 Fixpoint allTrue (s : set index) : Prop :=
adamc@144 439 match s with
adamc@144 440 | nil => True
adamc@144 441 | v :: s' => holds v /\ allTrue s'
adamc@144 442 end.
adamc@144 443
adamc@144 444 Theorem allTrue_add : forall v s,
adamc@144 445 allTrue s
adamc@144 446 -> holds v
adamc@144 447 -> allTrue (add s v).
adamc@144 448 induction s; crush;
adamc@144 449 match goal with
adamc@144 450 | [ |- context[if ?E then _ else _] ] => destruct E
adamc@144 451 end; crush.
adamc@144 452 Qed.
adamc@144 453
adamc@144 454 Theorem allTrue_In : forall v s,
adamc@144 455 allTrue s
adamc@144 456 -> set_In v s
adamc@144 457 -> varmap_find False v atomics.
adamc@144 458 induction s; crush.
adamc@144 459 Qed.
adamc@144 460
adamc@144 461 Hint Resolve allTrue_add allTrue_In.
adamc@144 462
adamc@144 463 Open Local Scope partial_scope.
adamc@144 464
adamc@147 465 (** Now we can write a function [forward] which implements deconstruction of hypotheses. It has a dependent type, in the style of Chapter 6, guaranteeing correctness. The arguments to [forward] are a goal formula [f], a set [known] of atomic formulas that we may assume are true, a hypothesis formula [hyp], and a success continuation [cont] that we call when we have extended [known] to hold new truths implied by [hyp]. *)
adamc@147 466
adamc@144 467 Definition forward (f : formula) (known : set index) (hyp : formula)
adamc@144 468 (cont : forall known', [allTrue known' -> formulaDenote atomics f])
adamc@144 469 : [allTrue known -> formulaDenote atomics hyp -> formulaDenote atomics f].
adamc@144 470 refine (fix F (f : formula) (known : set index) (hyp : formula)
adamc@144 471 (cont : forall known', [allTrue known' -> formulaDenote atomics f]){struct hyp}
adamc@144 472 : [allTrue known -> formulaDenote atomics hyp -> formulaDenote atomics f] :=
adamc@144 473 match hyp return [allTrue known -> formulaDenote atomics hyp -> formulaDenote atomics f] with
adamc@144 474 | Atomic v => Reduce (cont (add known v))
adamc@144 475 | Truth => Reduce (cont known)
adamc@144 476 | Falsehood => Yes
adamc@144 477 | And h1 h2 =>
adamc@144 478 Reduce (F (Imp h2 f) known h1 (fun known' =>
adamc@144 479 Reduce (F f known' h2 cont)))
adamc@144 480 | Or h1 h2 => F f known h1 cont && F f known h2 cont
adamc@144 481 | Imp _ _ => Reduce (cont known)
adamc@144 482 end); crush.
adamc@144 483 Defined.
adamc@144 484
adamc@147 485 (** A [backward] function implements analysis of the final goal. It calls [forward] to handle implications. *)
adamc@147 486
adamc@144 487 Definition backward (known : set index) (f : formula) : [allTrue known -> formulaDenote atomics f].
adamc@144 488 refine (fix F (known : set index) (f : formula) : [allTrue known -> formulaDenote atomics f] :=
adamc@144 489 match f return [allTrue known -> formulaDenote atomics f] with
adamc@144 490 | Atomic v => Reduce (In_dec v known)
adamc@144 491 | Truth => Yes
adamc@144 492 | Falsehood => No
adamc@144 493 | And f1 f2 => F known f1 && F known f2
adamc@144 494 | Or f1 f2 => F known f1 || F known f2
adamc@144 495 | Imp f1 f2 => forward f2 known f1 (fun known' => F known' f2)
adamc@144 496 end); crush; eauto.
adamc@144 497 Defined.
adamc@144 498
adamc@147 499 (** A simple wrapper around [backward] gives us the usual type of a partial decision procedure. *)
adamc@147 500
adamc@144 501 Definition my_tauto (f : formula) : [formulaDenote atomics f].
adamc@144 502 intro; refine (Reduce (backward nil f)); crush.
adamc@144 503 Defined.
adamc@144 504 End my_tauto.
adamc@144 505
adamc@147 506 (** Our final tactic implementation is now fairly straightforward. First, we [intro] all quantifiers that do not bind [Prop]s. Then we call the [quote] tactic, which implements the reflection for us. Finally, we are able to construct an exact proof via [partialOut] and the [my_tauto] Gallina function. *)
adamc@147 507
adamc@144 508 Ltac my_tauto :=
adamc@144 509 repeat match goal with
adamc@144 510 | [ |- forall x : ?P, _ ] =>
adamc@144 511 match type of P with
adamc@144 512 | Prop => fail 1
adamc@144 513 | _ => intro
adamc@144 514 end
adamc@144 515 end;
adamc@144 516 quote formulaDenote;
adamc@144 517 match goal with
adamc@144 518 | [ |- formulaDenote ?m ?f ] => exact (partialOut (my_tauto m f))
adamc@144 519 end.
adamc@148 520 (* end thide *)
adamc@144 521
adamc@147 522 (** A few examples demonstrate how the tactic works. *)
adamc@147 523
adamc@144 524 Theorem mt1 : True.
adamc@144 525 my_tauto.
adamc@144 526 Qed.
adamc@144 527
adamc@144 528 Print mt1.
adamc@147 529 (** [[
adamc@147 530
adamc@147 531 mt1 = partialOut (my_tauto (Empty_vm Prop) Truth)
adamc@147 532 : True
adamc@147 533 ]]
adamc@147 534
adamc@147 535 We see [my_tauto] applied with an empty [varmap], since every subformula is handled by [formulaDenote]. *)
adamc@144 536
adamc@144 537 Theorem mt2 : forall x y : nat, x = y --> x = y.
adamc@144 538 my_tauto.
adamc@144 539 Qed.
adamc@144 540
adamc@144 541 Print mt2.
adamc@147 542 (** [[
adamc@147 543
adamc@147 544 mt2 =
adamc@147 545 fun x y : nat =>
adamc@147 546 partialOut
adamc@147 547 (my_tauto (Node_vm (x = y) (Empty_vm Prop) (Empty_vm Prop))
adamc@147 548 (Imp (Atomic End_idx) (Atomic End_idx)))
adamc@147 549 : forall x y : nat, x = y --> x = y
adamc@147 550 ]]
adamc@147 551
adamc@147 552 Crucially, both instances of [x = y] are represented with the same index, [End_idx]. The value of this index only needs to appear once in the [varmap], whose form reveals that [varmap]s are represented as binary trees, where [index] values denote paths from tree roots to leaves. *)
adamc@144 553
adamc@144 554 Theorem mt3 : forall x y z,
adamc@144 555 (x < y /\ y > z) \/ (y > z /\ x < S y)
adamc@144 556 --> y > z /\ (x < y \/ x < S y).
adamc@144 557 my_tauto.
adamc@144 558 Qed.
adamc@144 559
adamc@144 560 Print mt3.
adamc@147 561 (** [[
adamc@147 562
adamc@147 563 fun x y z : nat =>
adamc@147 564 partialOut
adamc@147 565 (my_tauto
adamc@147 566 (Node_vm (x < S y) (Node_vm (x < y) (Empty_vm Prop) (Empty_vm Prop))
adamc@147 567 (Node_vm (y > z) (Empty_vm Prop) (Empty_vm Prop)))
adamc@147 568 (Imp
adamc@147 569 (Or (And (Atomic (Left_idx End_idx)) (Atomic (Right_idx End_idx)))
adamc@147 570 (And (Atomic (Right_idx End_idx)) (Atomic End_idx)))
adamc@147 571 (And (Atomic (Right_idx End_idx))
adamc@147 572 (Or (Atomic (Left_idx End_idx)) (Atomic End_idx)))))
adamc@147 573 : forall x y z : nat,
adamc@147 574 x < y /\ y > z \/ y > z /\ x < S y --> y > z /\ (x < y \/ x < S y)
adamc@147 575 ]]
adamc@147 576
adamc@147 577 Our goal contained three distinct atomic formulas, and we see that a three-element [varmap] is generated.
adamc@147 578
adamc@147 579 It can be interesting to observe differences between the level of repetition in proof terms generated by [my_tauto] and [tauto] for especially trivial theorems. *)
adamc@144 580
adamc@144 581 Theorem mt4 : True /\ True /\ True /\ True /\ True /\ True /\ False --> False.
adamc@144 582 my_tauto.
adamc@144 583 Qed.
adamc@144 584
adamc@144 585 Print mt4.
adamc@147 586 (** [[
adamc@147 587
adamc@147 588 mt4 =
adamc@147 589 partialOut
adamc@147 590 (my_tauto (Empty_vm Prop)
adamc@147 591 (Imp
adamc@147 592 (And Truth
adamc@147 593 (And Truth
adamc@147 594 (And Truth (And Truth (And Truth (And Truth Falsehood))))))
adamc@147 595 Falsehood))
adamc@147 596 : True /\ True /\ True /\ True /\ True /\ True /\ False --> False
adamc@147 597 ]] *)
adamc@144 598
adamc@144 599 Theorem mt4' : True /\ True /\ True /\ True /\ True /\ True /\ False -> False.
adamc@144 600 tauto.
adamc@144 601 Qed.
adamc@144 602
adamc@144 603 Print mt4'.
adamc@147 604 (** [[
adamc@147 605
adamc@147 606 mt4' =
adamc@147 607 fun H : True /\ True /\ True /\ True /\ True /\ True /\ False =>
adamc@147 608 and_ind
adamc@147 609 (fun (_ : True) (H1 : True /\ True /\ True /\ True /\ True /\ False) =>
adamc@147 610 and_ind
adamc@147 611 (fun (_ : True) (H3 : True /\ True /\ True /\ True /\ False) =>
adamc@147 612 and_ind
adamc@147 613 (fun (_ : True) (H5 : True /\ True /\ True /\ False) =>
adamc@147 614 and_ind
adamc@147 615 (fun (_ : True) (H7 : True /\ True /\ False) =>
adamc@147 616 and_ind
adamc@147 617 (fun (_ : True) (H9 : True /\ False) =>
adamc@147 618 and_ind (fun (_ : True) (H11 : False) => False_ind False H11)
adamc@147 619 H9) H7) H5) H3) H1) H
adamc@147 620 : True /\ True /\ True /\ True /\ True /\ True /\ False -> False
adamc@147 621 ]] *)
adamc@147 622
adamc@149 623
adamc@149 624 (** * Exercises *)
adamc@149 625
adamc@149 626 (** %\begin{enumerate}%#<ol>#
adamc@149 627
adamc@149 628 %\item%#<li># Implement a reflective procedure for normalizing systems of linear equations over rational numbers. In particular, the tactic should identify all hypotheses that are linear equations over rationals where the equation righthand sides are constants. It should normalize each hypothesis to have a lefthand side that is a sum of products of constants and variables, with no variable appearing multiple times. Then, your tactic should add together all of these equations to form a single new equation, possibly clearing the original equations. Some coefficients may cancel in the addition, reducing the number of variables that appear.
adamc@149 629
adamc@149 630 To work with rational numbers, import module [QArith] and use [Open Local Scope Q_scope]. All of the usual arithmetic operator notations will then work with rationals, and there are shorthands for constants 0 and 1. Other rationals must be written as [num # den] for numerator [num] and denominator [den]. Use the infix operator [==] in place of [=], to deal with different ways of expressing the same number as a fraction. For instance, a theorem and proof like this one should work with your tactic:
adamc@149 631
adamc@149 632 [[
adamc@149 633 Theorem t2 : forall x y z, (2 # 1) * (x - (3 # 2) * y) == 15 # 1
adamc@149 634 -> z + (8 # 1) * x == 20 # 1
adamc@149 635 -> (-6 # 2) * y + (10 # 1) * x + z == 35 # 1.
adamc@149 636 [[
adamc@149 637
adamc@149 638 intros; reflectContext; assumption.
adamc@149 639 [[
adamc@149 640 Qed.
adamc@149 641
adamc@149 642 Your solution can work in any way that involves reflecting syntax and doing most calculation with a Gallina function. These hints outline a particular possible solution. Throughout, the [ring] tactic will be helpful for proving many simple facts about rationals, and tactics like [rewrite] are correctly overloaded to work with rational equality [==].
adamc@149 643
adamc@149 644 %\begin{enumerate}%#<ol>#
adamc@149 645 %\item%#<li># Define an inductive type [exp] of expressions over rationals (which inhabit the Coq type [Q]). Include variables (represented as natural numbers), constants, addition, substraction, and multiplication.#</li>#
adamc@149 646 %\item%#<li># Define a function [lookup] for reading an element out of a list of rationals, by its position in the list.#</li>#
adamc@149 647 %\item%#<li># Define a function [expDenote] that translates [exp]s, along with lists of rationals representing variable values, to [Q].#</li>#
adamc@149 648 %\item%#<li># Define a recursive function [eqsDenote] over [list (exp * Q)], characterizing when all of the equations are true.#</li>#
adamc@149 649 %\item%#<li># Fix a representation [lhs] of flattened expressions. Where [len] is the number of variables, represent a flattened equation as [ilist Q len]. Each position of the list gives the coefficient of the corresponding variable.#</li>#
adamc@151 650 %\item%#<li># Write a recursive function [linearize] that takes a constant [k] and an expression [e] and optionally returns an [lhs] equivalent to [k * e]. This function returns [None] when it discovers that the input expression is not linear. The parameter [len] of [lhs] should be a parameter of [linearize], too. The functions [singleton], [everywhere], and [map2] from [DepList] will probably be helpful. It is also helpful to know that [Qplus] is the identifier for rational addition.#</li>#
adamc@149 651 %\item%#<li># Write a recursive function [linearizeEqs : list (exp * Q) -> option (lhs * Q)]. This function linearizes all of the equations in the list in turn, building up the sum of the equations. It returns [None] if the linearization of any constituent equation fails.#</li>#
adamc@149 652 %\item%#<li># Define a denotation function for [lhs].#</li>#
adamc@149 653 %\item%#<li># Prove that, when [exp] linearization succeeds on constant [k] and expression [e], the linearized version has the same meaning as [k * e].#</li>#
adamc@149 654 %\item%#<li># Prove that, when [linearizeEqs] succeeds on an equation list [eqs], then the final summed-up equation is true whenever the original equation list is true.#</li>#
adamc@149 655 %\item%#<li># Write a tactic [findVarsHyps] to search through all equalities on rationals in the context, recursing through addition, subtraction, and multiplication to find the list of expressions that should be treated as variables. This list should be suitable as an argument to [expDenote] and [eqsDenote], associating a [Q] value to each natural number that stands for a variable.#</li>#
adamc@149 656 %\item%#<li># Write a tactic [reflect] to reflect a [Q] expression into [exp], with respect to a given list of variable values.#</li>#
adamc@149 657 %\item%#<li># Write a tactic [reflectEqs] to reflect a formula that begins with a sequence of implications from linear equalities whose lefthand sides are expressed with [expDenote]. This tactic should build a [list (exp * Q)] representing the equations. Remember to give an explicit type annotation when returning a nil list, as in [constr:(@nil (exp * Q))].#</li>#
adamc@149 658 %\item%#<li># Now this final tactic should do the job:
adamc@149 659 [[
adamc@149 660 Ltac reflectContext :=
adamc@149 661 let ls := findVarsHyps in
adamc@149 662 repeat match goal with
adamc@149 663 | [ H : ?e == ?num # ?den |- _ ] =>
adamc@149 664 let r := reflect ls e in
adamc@149 665 change (expDenote ls r == num # den) in H;
adamc@149 666 generalize H
adamc@149 667 end;
adamc@149 668 match goal with
adamc@149 669 | [ |- ?g ] => let re := reflectEqs g in
adamc@149 670 intros;
adamc@149 671 let H := fresh "H" in
adamc@149 672 assert (H : eqsDenote ls re); [ simpl in *; tauto
adamc@149 673 | repeat match goal with
adamc@149 674 | [ H : expDenote _ _ == _ |- _ ] => clear H
adamc@149 675 end;
adamc@149 676 generalize (linearizeEqsCorrect ls re H); clear H; simpl;
adamc@149 677 match goal with
adamc@149 678 | [ |- ?X == ?Y -> _ ] =>
adamc@149 679 ring_simplify X Y; intro
adamc@149 680 end ]
adamc@149 681 end.
adamc@149 682
adamc@149 683 #</ol>#%\end{enumerate}%
adamc@149 684 #</li>#
adamc@149 685
adamc@149 686 #</ol>#%\end{enumerate}% *)