Commit 7057f379 authored by Adam Chlipala's avatar Adam Chlipala

Prose for domain-theoretic approach

parent 8ed7cf25
......@@ -259,3 +259,11 @@
year = {2006},
booktitle = {Proceedings of the 15th European Symposium on Programming}
}
@InBook{WinskelDomains,
author = "Glynn Winskel",
title = "The Formal Semantics of Programming Languages",
year = "1993",
publisher = "MIT Press",
chapter = "8"
}
......@@ -279,8 +279,15 @@ well_founded_induction
(** * A Non-Termination Monad Inspired by Domain Theory *)
(** The key insights of %\index{domain theory}%domain theory%~\cite{WinskelDomains}% inspire the next approach to modeling non-termination. Domain theory is based on %\emph{%#<i>#information orders#</i>#%}% that relate values representing computatiion results, according to how much information these values convey. For instance, a simple domain might include values %``%#"#the program does not terminate#"#%''% and %``%#"#the program terminates with the answer 5.#"#%''% The former is considered to be an %\emph{%#<i>#approximation#</i>#%}% of the latter, while the latter is %\emph{%#<i>#not#</i>#%}% an approximation of %``%#"#the program terminates with the answer 6.#"#%''% The details of domain theory will not be important in what follows; we merely borrow the notion of an approximation ordering on computation results.
Consider this definition of a type of computations. *)
Section computation.
Variable A : Type.
(** The type [A] describes the result a computation will yield, if it terminates.
We give a rich dependent type to computations themselves: *)
Definition computation :=
{f : nat -> option A
......@@ -289,13 +296,22 @@ Section computation.
-> forall (n' : nat), n' >= n
-> f n' = Some v}.
(** A computation is fundamentally a function [f] from an %\emph{%#<i>#approximation level#</i>#%}% [n] to an optional result. Intuitively, higher [n] values enable termination in more cases than lower values. A call to [f] may return [None] to indicate that [n] was not high enough to run the computation to completion; higher [n] values may yield [Some]. Further, the proof obligation within the sigma type asserts that [f] is %\emph{%#<i>#monotone#</i>#%}% in an appropriate sense: when some [n] is sufficient to produce termination, so are all higher [n] values, and they all yield the same program result [v].
It is easy to define a relation characterizing when a computation runs to a particular result at a particular approximation level. *)
Definition runTo (m : computation) (n : nat) (v : A) :=
proj1_sig m n = Some v.
(** On top of [runTo], we also define [run], which is the most abstract notion of when a computation runs to a value. *)
Definition run (m : computation) (v : A) :=
exists n, runTo m n v.
End computation.
(** The book source code contains at this point some tactics, lemma proofs, and hint commands, to be used in proving facts about computations. Since their details are orthogonal to the message of this chapter, I have omitted them in the rendered version. *)
(* begin hide *)
Hint Unfold runTo.
Ltac run' := unfold run, runTo in *; try red; crush;
......@@ -352,6 +368,10 @@ Hint Resolve ge_refl.
Hint Extern 1 => match goal with
| [ H : _ = exist _ _ _ |- _ ] => rewrite H
end.
(* end hide *)
(** remove printing exists *)
(** Now, as a simple first example of a computation, we can define [Bottom], which corresponds to an infinite loop. For any approximation level, it fails to terminate (returns [None]). Note the use of [abstract] to create a new opaque lemma for the proof found by the [run] tactic. In contrast to the previous section, opauqe proofs are fine here, since the proof components of computations do not influence evaluation behavior. *)
Section Bottom.
Variable A : Type.
......@@ -365,6 +385,8 @@ Section Bottom.
Qed.
End Bottom.
(** A slightly more complicated example is [Return], which gives the same terminating answer at every approximation level. *)
Section Return.
Variable A : Type.
Variable v : A.
......@@ -376,13 +398,9 @@ Section Return.
Theorem run_Return : run Return v.
run.
Qed.
Theorem run_Return_inv : forall x, run Return x -> x = v.
run.
Qed.
End Return.
Hint Resolve run_Return.
(** The name [Return] was meant to be suggestive of the standard operations of monads%~\cite{Monads}%. The other standard operation is [Bind], which lets us run one computation and, if it terminates, pass its result off to another computation. *)
Section Bind.
Variables A B : Type.
......@@ -400,8 +418,6 @@ Section Bind.
end); abstract run.
Defined.
Require Import Max.
Theorem run_Bind : forall (v1 : A) (v2 : B),
run m1 v1
-> run (m2 v1) v2
......@@ -410,21 +426,15 @@ Section Bind.
| [ x : nat, y : nat |- _ ] => exists (max x y)
end; run.
Qed.
Theorem run_Bind_inv : forall (v2 : B),
run Bind v2
-> exists v1 : A,
run m1 v1
/\ run (m2 v1) v2.
run.
Qed.
End Bind.
Hint Resolve run_Bind.
(** A simple notation lets us write [Bind] calls the way they appear in Haskell. *)
Notation "x <- m1 ; m2" :=
(Bind m1 (fun x => m2)) (right associativity, at level 70).
(** We can verify that we have indeed defined a monad, by proving the standard monad laws. Part of the exercise is choosing an appropriate notion of equality between computations. We use %``%#"#equality at all approximation levels.#"#%''% *)
Definition meq A (m1 m2 : computation A) := forall n, proj1_sig m1 n = proj1_sig m2 n.
Theorem left_identity : forall A B (a : A) (f : A -> computation B),
......@@ -442,20 +452,7 @@ Theorem associativity : forall A B C (m : computation A) (f : A -> computation B
run.
Qed.
Section monotone_runTo.
Variable A : Type.
Variable c : computation A.
Variable v : A.
Theorem monotone_runTo : forall (n1 : nat),
runTo c n1 v
-> forall n2, n2 >= n1
-> runTo c n2 v.
run.
Qed.
End monotone_runTo.
Hint Resolve monotone_runTo.
(** Now we come to the piece most directly inspired by domain theory. We want to support general recursive function definitions, but domain theory tells us that not all definitions are reasonable; some fail to be %\emph{%#<i>#continuous#</i>#%}% and thus represent unrealizable computations. To formalize an analogous notion of continuity for our non-termination monad, we write down the approximation relation on computation results that we have had in mind all along. *)
Section lattice.
Variable A : Type.
......@@ -464,24 +461,35 @@ Section lattice.
forall v, x = Some v -> y = Some v.
End lattice.
Hint Unfold leq.
(** We now have the tools we need to define a new [Fix] combinator that, unlike the one we saw in the prior section, does not require a termination proof, and in fact admits recursive definition of functions that fail to terminate on some or all inputs. *)
Section Fix.
(** First, we have the function domain and range types. *)
Variables A B : Type.
(** Next comes the function body, which is written as though it can be parameterized over itself, for recursive calls. *)
Variable f : (A -> computation B) -> (A -> computation B).
(** Finally, we impose an obligation to prove that the body [f] is continuous. That is, when [f] terminates according to one recursive version of itself, it also terminates with the same result at the same approximation level when passed a recursive version that refines the original, according to [leq]. *)
Hypothesis f_continuous : forall n v v1 x,
runTo (f v1 x) n v
-> forall (v2 : A -> computation B),
(forall x, leq (proj1_sig (v1 x) n) (proj1_sig (v2 x) n))
-> runTo (f v2 x) n v.
(** The computational part of the [Fix] combinator is easy to define. At approximation level 0, we diverge; at higher levels, we run the body with a functional argument drawn from the next lower level. *)
Fixpoint Fix' (n : nat) (x : A) : computation B :=
match n with
| O => Bottom _
| S n' => f (Fix' n') x
end.
(** Now it is straightforward to package [Fix'] as a computation combinator [Fix]. *)
Hint Extern 1 (_ >= _) => omega.
Hint Unfold leq.
......@@ -505,12 +513,7 @@ Section Fix.
intro x; exists (fun n => proj1_sig (Fix' n x) n); abstract run.
Defined.
Definition extensional (f : (A -> computation B) -> (A -> computation B)) :=
forall g1 g2 n,
(forall x, proj1_sig (g1 x) n = proj1_sig (g2 x) n)
-> forall x, proj1_sig (f g1 x) n = proj1_sig (f g2 x) n.
Hypothesis f_extensional : extensional f.
(** Finally, we can prove that [Fix] obeys the expected computation rule. *)
Theorem run_Fix : forall x v,
run (f Fix x) v
......@@ -521,8 +524,7 @@ Section Fix.
Qed.
End Fix.
Hint Resolve run_Fix.
(* begin hide *)
Lemma leq_Some : forall A (x y : A), leq (Some x) (Some y)
-> x = y.
intros ? ? ? H; generalize (H _ (refl_equal _)); crush.
......@@ -533,16 +535,7 @@ Lemma leq_None : forall A (x y : A), leq (Some x) None
intros ? ? ? H; generalize (H _ (refl_equal _)); crush.
Qed.
Definition mergeSort' : forall A, (A -> A -> bool) -> list A -> computation (list A).
refine (fun A le => Fix
(fun (mergeSort : list A -> computation (list A))
(ls : list A) =>
if le_lt_dec 2 (length ls)
then let lss := partition ls in
ls1 <- mergeSort (fst lss);
ls2 <- mergeSort (snd lss);
Return (merge le ls1 ls2)
else Return ls) _); abstract (run;
Ltac mergeSort' := run;
repeat (match goal with
| [ |- context[match ?E with O => _ | S _ => _ end] ] => destruct E
end; run);
......@@ -554,30 +547,53 @@ Definition mergeSort' : forall A, (A -> A -> bool) -> list A -> computation (lis
end
end; run; repeat match goal with
| [ H : _ |- _ ] => (apply leq_None in H; tauto) || (apply leq_Some in H; subst)
end; auto).
end; auto.
(* end hide *)
(** After all that work, it is now fairly painless to define a version of [mergeSort] that requires no proof of termination. We appeal to a program-specific tactic whose definition is hidden here but present in the book source. *)
Definition mergeSort' : forall A, (A -> A -> bool) -> list A -> computation (list A).
refine (fun A le => Fix
(fun (mergeSort : list A -> computation (list A))
(ls : list A) =>
if le_lt_dec 2 (length ls)
then let lss := partition ls in
ls1 <- mergeSort (fst lss);
ls2 <- mergeSort (snd lss);
Return (merge le ls1 ls2)
else Return ls) _); abstract mergeSort'.
Defined.
Print mergeSort'.
(** Furthermore, %``%#"#running#"#%''% [mergeSort'] on concrete inputs is as easy as choosing a sufficiently high approximation level and letting Coq's computation rules do the rest. Contrast this with the proof work that goes into deriving an evaluation fact for a deeply embedded language, with one explicit proof rule application per execution step. *)
Lemma test_mergeSort' : run (mergeSort' leb (1 :: 2 :: 36 :: 8 :: 19 :: nil))
(1 :: 2 :: 8 :: 19 :: 36 :: nil).
exists 4; reflexivity.
Qed.
Definition looper : bool -> computation unit.
refine (Fix (fun looper (b : bool) =>
if b then Return tt else looper b) _);
abstract (unfold leq in *; run;
(** There is another benefit of our new [Fix] compared with one we used in the previous section: we can now write recursive functions that sometimes fail to terminate, without losing easy reasoning principles for the terminating cases. Consider this simple example, which appeals to another tactic whose definition we elide here. *)
(* begin hide *)
Ltac looper := unfold leq in *; run;
repeat match goal with
| [ x : unit |- _ ] => destruct x
| [ x : bool |- _ ] => destruct x
end; auto).
end; auto.
(* end hide *)
Definition looper : bool -> computation unit.
refine (Fix (fun looper (b : bool) =>
if b then Return tt else looper b) _); abstract looper.
Defined.
Lemma test_looper : run (looper true) tt.
exists 1; reflexivity.
Qed.
(** As before, proving outputs for specific inputs is as easy as demonstrating a high enough approximation level.
There are other theorems that are important to prove about combinators like [Return], [Bind], and [Fix]. In general, for a computation [c], we sometimes have a hypothesis proving [run c v] for some [v], and we want to perform inversion to deduce what [v] must be. Each combinator should ideally have a theorem of that kind, for [c] built directly from that combinator. We have omitted such theorems here, but they are not hard to prove. In general, the domain theory-inspired approach avoids the type-theoretic %``%#"#gotchas#"#%''% that tend to show up in approaches that try to mix normal Coq computation with explicit syntax types. The next section of this chapter demonstrates two alternate approaches of that sort. *)
(** * Co-Inductive Non-Termination Monads *)
......@@ -665,6 +681,8 @@ Qed.
Hint Rewrite TBind_Answer : cpdt.
(** printing exists $\exists$ *)
Theorem tassociativity : forall A B C (m : thunk A) (f : A -> thunk B) (g : B -> thunk C),
thunk_eq (TBind (TBind m f) g) (TBind m (fun x => TBind (f x) g)).
intros; apply (thunk_eq_coind (fun m1 m2 => (exists m,
......@@ -836,6 +854,7 @@ Fixpoint map A B (f : A -> computation B) (ls : list A) : computation (list B) :
Return (x' :: ls'')))
end.
(** remove printing exists *)
Theorem test_map : run (map (fun x => Return (S x)) (1 :: 2 :: 3 :: nil)) (2 :: 3 :: 4 :: nil).
exists 1; reflexivity.
Qed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment