Hello world

This commit is contained in:
Frederik Hanghøj Iversen 2020-04-19 16:45:00 +02:00
commit e6fdf8060d
22 changed files with 4641 additions and 0 deletions

4
.gitignore vendored Normal file
View file

@ -0,0 +1,4 @@
*.bak
/src/Language/Lang/TestGrammer
src/**/*.hi
src/**/*.o

13
Makefile Normal file
View file

@ -0,0 +1,13 @@
src/Makefile: grammer.cf
bnfc $< -p Language.Lang -o src -m
src/Language/Lang/TestGrammer: ./src/Makefile
cd src && make all
.PHONY: install parse
install: src/Language/Lang/TestGrammer
install $< ~/.local/bin/lang-parse
parse: install
lang-parse lang/small.lang

17
backlog.org Normal file
View file

@ -0,0 +1,17 @@
* Indentation-aware parser
Figure out if `bnfc` can support the idea about block-scoping
according to non-decreasing indentation levels. Examples:
-- This is a
comment.
id
= x
-> x
id 2
Here the "--" in column 0 starts a comment block which ends the next
time we write something in column <= 0. Which we do in column 4.
Similarly the binding that starts at row 4 (with the introduction of
an identifier) ends in row 8.

244
grammer.cf Normal file
View file

@ -0,0 +1,244 @@
-- programs
entrypoints Module;
Module . Module ::= [Binding] ;
BindingDeclaration . Binding ::= Declaration;
BindingDefinition . Binding ::= Definition;
BindingPragma . Binding ::= "{#" Pragma "#}";
-- BindingComment . Binding ::= Comment;
DeclarationNamed . Declaration ::= Name ":" Type;
DeclarationAnonymous . Declaration ::= Type;
DefinitionNamed . Definition ::= Name "=" Expression;
DefinitionAnonymous . Definition ::= Expression;
TypeName . Type ::= Name;
TypeApplication . Type ::= Type Type;
TypeAbstraction . Type ::= Type "->" Type;
TypeImplicit . Type ::= "{{" [Declaration] "}}";
TypeRecord . Type ::= "{" [Declaration] "}";
TypeAlternative . Type ::= "[" [Declaration] "]";
TypeParens . Type ::= "(" Type ")";
-- TODO: Do I want wildcards to be stored separately in the AST from
-- non-wildcards? Wilcards are identifiers that start with a
-- lower-case letter.
token Name ((letter | '_' | '\'') (letter | digit | '.' | '-' | '_' | '\'')*);
-- Perhaps not the best rule. Pragmas are basically comments, but I
-- *do* want them to appear in the AST.
Pragma . Pragma ::= [ Name ];
-- TODO: We are using separators because we cannot simply use newlines
-- as separators because it would disallow breaking up bindings across
-- multiple lines. What we really want is some sort of non-increasing
-- indentation level support. So that can correctly parse e.g.:
-- id
-- = x
-- -> x
separator Binding ",";
separator Declaration ",";
separator Definition ",";
separator nonempty Type ",";
separator nonempty Name " ";
-- TODO: Does not support my idea of `--` marking the beginning of a
-- comment *block* which is terminated when the level of indentation
-- matches the comment start marker.
-- Comment . Comment ::= "--" [Name];
-- layout "--";
-- layout "->";
comment "--";
comment "{-" "-}";
-- Only difference from types is that expression do not contain
-- declarations but only definition
ExpressionName . Expression ::= Name;
ExpressionLiteral . Expression ::= NumericLiteral;
ExpressionApplication . Expression ::= Expression Expression;
ExpressionAbstraction . Expression ::= Expression "->" Expression;
ExpressionImplicit . Expression ::= "{{" Module "}}";
ExpressionRecord . Expression ::= "{" Module "}";
ExpressionAlternative . Expression ::= "[" Module "]";
ExpressionParens . Expression ::= "(" Expression ")";
token NumericLiteral ((digit+ '.'? digit*) | (digit* '.'? digit+) ('e' digit+)?)
-- -- programs ------------------------------------------------
-- entrypoints Prog ;
-- Program. Prog ::= [TopDef] ;
-- FnDef. TopDef ::= Type Ident "(" [Arg] ")" Blk ;
-- separator nonempty TopDef "" ;
-- Argument. Arg ::= Type Ident;
-- separator Arg "," ;
-- -- statements ----------------------------------------------
-- Block. Blk ::= "{" [Stmt] "}" ;
-- separator Stmt "" ;
-- Empty. Stmt ::= ";" ;
-- BStmt. Stmt ::= Blk ;
-- Decl. Stmt ::= Type [Item] ";" ;
-- NoInit. Item ::= Ident ;
-- Init. Item ::= Ident "=" Expr ;
-- InitObj. Item ::= Ident "=" "new" Constructor ;
-- separator nonempty Item "," ;
-- Ass. Stmt ::= Expr5 "=" Expr ";" ;
-- Incr. Stmt ::= Ident "++" ";" ;
-- Decr. Stmt ::= Ident "--" ";" ;
-- Ret. Stmt ::= "return" Expr ";" ;
-- VRet. Stmt ::= "return" ";" ;
-- Cond. Stmt ::= "if" "(" Expr ")" Stmt ;
-- CondElse. Stmt ::= "if" "(" Expr ")" Stmt "else" Stmt ;
-- While. Stmt ::= "while" "(" Expr ")" Stmt ;
-- For. Stmt ::= "for" "(" Type Ident ":" Expr ")" Stmt ;
-- SExp. Stmt ::= Expr ";" ;
-- TypeCon. Constructor ::= Type ;
-- ArrayCon. Constructor ::= Constructor "[" Expr "]" ;
-- Indx. Index ::= "[" Expr "]" ;
-- -- Types ---------------------------------------------------
-- Int. Type ::= "int" ;
-- Doub. Type ::= "double" ;
-- Bool. Type ::= "boolean" ;
-- Void. Type ::= "void" ;
-- Array. Type ::= Type "[]" ;
-- internal String. Type ::= "string" ;
-- internal Fun. Type ::= Type "(" [Type] ")" ;
-- separator Type "," ;
-- -- Expressions ---------------------------------------------
-- EVar. Expr6 ::= Ident ;
-- ELitInt. Expr6 ::= Integer ;
-- ELitDoub. Expr6 ::= Double;
-- ELitTrue. Expr6 ::= "true" ;
-- ELitFalse. Expr6 ::= "false" ;
-- EApp. Expr6 ::= Ident "(" [Expr] ")" ;
-- EString. Expr6 ::= String ;
-- Dot. Expr6 ::= Expr6 "." Ident ;
-- EIndex. Expr6 ::= Expr6 Index;
-- Neg. Expr5 ::= "-" Expr6 ;
-- Not. Expr5 ::= "!" Expr6 ;
-- EMul. Expr4 ::= Expr4 MulOp Expr5 ;
-- EAdd. Expr3 ::= Expr3 AddOp Expr4 ;
-- ERel. Expr2 ::= Expr2 RelOp Expr3 ;
-- EAnd. Expr1 ::= Expr2 "&&" Expr1 ;
-- EOr. Expr ::= Expr1 "||" Expr ;
-- internal EAnn. Expr ::= "{-" Type "-}" Expr ;
-- coercions Expr 6 ;
-- separator Expr "," ;
-- -- operators -----------------------------------------------
-- Plus. AddOp ::= "+" ;
-- Minus. AddOp ::= "-" ;
-- Times. MulOp ::= "*" ;
-- Div. MulOp ::= "/" ;
-- Mod. MulOp ::= "%" ;
-- LTH. RelOp ::= "<" ;
-- LE. RelOp ::= "<=" ;
-- GTH. RelOp ::= ">" ;
-- GE. RelOp ::= ">=" ;
-- EQU. RelOp ::= "==" ;
-- NE. RelOp ::= "!=" ;
-- -- comments ------------------------------------------------
-- comment "#" ;
-- comment "//" ;
-- comment "/*" "*/" ;

2
lang/ambig.lang Normal file
View file

@ -0,0 +1,2 @@
_ : { _ : {} },
_ = { _ : {} },

164
lang/example.lang Normal file
View file

@ -0,0 +1,164 @@
-- The empty type is the empty alternative.
Bot : Type
Bot = []
-- The unit type is the empty record.
Top : Type
Top = {}
t : Top
-- Names of literals can be inferred.
t = {} @Top
t = {}
Cop : Type
Cop = {}
c : Cop
c = {}
-- Renaming
c : Cop
c = t @Top
-- Dropping names
k : {}
k : t @_
{} : {}
Bool : Type
Bool = [ false , true ]
f : Bool
f = false
t : Bool
t = true
-- data Maybe a = Nothing | Just a
-- data Maybe : Type -> Type where
-- Nothing : Maybe a
-- Just : a -> Maybe a
Maybe : Type -> Type
-- Types can be omitted when they can be inferred:
Maybe A = [ nothing : Maybe A , just A : Maybe A ]
Maybe A = [ nothing , just A ]
n : Maybe A
n = nothing
j : Maybe Top
j = just {}
Either : Type -> Type -> Type
Either A B = [ left A , right B ]
Nat : Type
Nat = [ zero : Nat , succ : Nat -> Nat ]
two : Nat
two = succ (succ zero)
Pair : Type -> Type -> Type
Pair A B { fst : A , snd : B }
-- Without naming the projections
Pair A B = { A , B }
p : Pair A B
p = { }
add : N -> N -> N
add n m {
n + m
}
mult : N -> N -> N
mult n m {
n * m
}
-- Modules are regular records. This is similar to how records work
-- in Agda. In other words, this module (record) contains two
-- elements. The elements of the record are themself functions that
-- can be exposed by "opening" Bin
Arith : Type
Arith = {
add :
}
Bin : N -> N -> Type
Bin n m =
add : N
add = n + m,
mult : N
mult = n * m
}
{} : {}
[] : []
-- {} is used to *introduce* records/products
-- pair.fst is used to eleminate records/producs
pr : Pair Top Cop
pr = { {}, {} }
pr = { fst = {}, snd = {} }
fst : Pair A B -> A
fst { fst } = fst
-- [] is used to eliminate alternatives/sums
either : (a -> c) -> (b -> c) -> Either a b -> c
either l r e = [ left a = l a, right b = r b ] e
-- Syntax for case-statements makes -XLambdaCase redundant.
either l r = [ left a = l a, right b = r b ]
-- Tags could in theory be omitted just like they can for {}-intro:
either l r = [ l, r ]
-- Perhaps the meaning will be rather difficult to decipher though,
also it would mean that the order in which summands are defined
become significant which might not be desirable
-- Note the duality:
{ fst = {}, snd = {} } is an introduction.
[ left a = l a, right b = r b ] is an elimination.
-- Comments could follow the non-decreasing indentation rule as well.
So that multi-line comments can use the same syntax as single-line
comments. Just that the comments on following lines must be
indented more than where the comment marker is located. In this
example the comment marker is at column 0.
Semigroup : Type -> Class
semigroup a = {
append : a -> a -> a
}
Monoid : Type -> Class
Monoid a = {
{_ Semigroup a _}, -- Curly braces in type signature signal implicit
arguments.
-- Could also be written:
{_ semigroup : Semigroup a _} -- I.e. the braces extend around the
field name as well.
empty : a
}
Functor : (Type -> Type) -> Class
Functor f = {
map : (a -> b) -> f a -> f b
}
{ empty = Nothing } : {Semigroup a} -> Monoid (Maybe a)
{ Nothing } : {Semigroup a} -> Monoid (Maybe a)
@{T} : [| T |] -> T
@[T] : T -> [| T |]
T@

177
lang/prim.lang Normal file
View file

@ -0,0 +1,177 @@
{# type Type #}
{# class Class #}
-- Classes
Semigroup : Type -> Class
Semigroup a = {{
append : a -> a -> a
}}
Monoid : Type -> Class
Monoid a = {{
semigroup : {{ Semigroup a }}
empty : a
}}
-- Types
Void : Type
Void = {
Void : Type
Void = []
semigroup : {{ Semigroup Void }}
semigroup = {{
append = []
}}
}
Unit : Type
Unit = {
Unit : Type
Unit = {}
semigroup : {{ Semigroup Unit }}
semigroup = {{
append _ _ = {}
}}
monoid : {{ Monoid Unit }}
monoid = {{
empty = {}
}}
}
{# prim bool Bool #}
Bool : Type
Bool = {
Bool : Type
Bool = [false : {}, true : {}]
Any : Type
Any = {
semigroup : {{ Semigroup Bool }}
semigroup = {{
append a = [false = a, true = true]
}}
monoid : {{ Monoid Bool }}
monoid = {{ empty = false }}
}
All : Type
All = {
semigroup : {{ Semigroup Bool }}
semigroup = {{
append a = [false = false, true = a]
}}
monoid : {{ Monoid Bool }}
monoid = {{ empty = true }}
}
}
Int : Type
Int = {
{# prim i64 Int #}
Int : Type
Int = [zero : {}, succ : Nat ]
Additive : Type
Additive = {
semigroup : {{ Semigroup Int }}
semigroup = {{
append n = [zero = n, succ m = succ (append n m) ]
}}
monoid : {{ Monoid Int }}
monoid = {{
empty = zero
}}
}
Multiplicative : Type
Multiplicative = {
semigroup : {{ Semigroup Int }}
semigroup = {{
append n = [zero = zero, succ m = Additive.append n (append n m) ]
}}
monoid : {{ Monoid Int }}
monoid = {{
empty = succ zero
}}
}
}
Pair : Type
Pair = {
Pair : Type -> Type -> Type
Pair a b = { left : a, right : b }
semigroup : {{ Semigroup A }} -> {{ Semigroup B }} -> {{ Semigroup (Pair A B) }}
semigroup = {{
append { a0, b0 } { a1, b1 } = { append a0 a1, append b0 b1 }
}}
monoid : {{ Monoid A }} -> {{ Monoid B }} -> {{ Monoid (Pair A B) }}
monoid = {{
empty = { empty, empty }
}}
}
Either : Type
Either = {
Either : Type -> Type -> Type
Either a b = [ left : a, right : b ]
Left : Type
Left = {
semigroup : {{ Semigroup (Either A B) }}
semigroup = {{
append = (l _ = l)
}}
}
Right : Type
Right = {
semigroup : {{ Semigroup (Either A B) }}
semigroup = {{
append = (_ r = r)
}}
}
}
List : Type
List = {
List : Type -> Type
List a = [ nil : {} , cons : { head : A , tail : List a } ]
semigroup : {{ Semigroup (List a) }}
semigroup = {{
append = [
nil b = b
(cons a as) b = cons a (append as b)
]
}}
monoid : {{ Monoid (List a) }}
monoid = {{
empty = nil
}}
}
{# prim char Char #}
Char : Type
{# prim string String #}
String : Type
String = List Char
{# builtin show Show #}
Show : Type -> Class
Show a = {{
show : a -> String
}}

1
lang/rec.lang Normal file
View file

@ -0,0 +1 @@
Void = {a : a}

179
lang/small.lang Normal file
View file

@ -0,0 +1,179 @@
{# type Type #} ,
{# class Class #} ,
-- Classes
Semigroup : Type -> Class,
Semigroup = a -> {{
append : a -> a -> a
}},
Monoid : Type -> Class,
Monoid = a -> {{
semigroup : {{ Semigroup a }},
empty : a
}},
-- Types
Void : Type,
Void = {
Void : Type,
Void = a,
semigroup : {{ Semigroup Void }},
semigroup = {{
append = []
}},
},
Unit : Type,
Unit = {
Unit : Type,
Unit = {},
semigroup : {{ Semigroup Unit }},
semigroup = {{
append = _ _ -> {},
}},
monoid : {{ Monoid Unit }},
monoid = {{
empty = {},
}},
},
{# prim bool Bool #},
Bool : Type,
Bool = {
Bool : Type,
Bool = [false : {}, true : {}],
Any : Type,
Any = {
semigroup : {{ Semigroup Bool }},
semigroup = {{
append = a -> [false = a, true = true],
}},
monoid : {{ Monoid Bool }},
monoid = {{ empty = false }},
},
All : Type,
All = {
semigroup : {{ Semigroup Bool }},
semigroup = {{
append = a -> [false = false, true = a],
}},
monoid : {{ Monoid Bool }},
monoid = {{ empty = true }},
},
},
Int : Type,
Int = {
{# prim i64 Int #},
Int : Type,
Int = [zero : {}, succ : Nat ],
Additive : Type,
Additive = {
semigroup : {{ Semigroup Int }},
semigroup = {{
append = n -> [zero = n, succ = m -> succ (append n m) ],
}},
monoid : {{ Monoid Int }},
monoid = {{
empty = zero,
}},
},
Multiplicative : Type,
Multiplicative = {
semigroup : {{ Semigroup Int }},
semigroup = {{
append = n -> [zero = zero, succ = m -> Additive.append n (append n m) ],
}},
monoid : {{ Monoid Int }},
monoid = {{
empty = succ zero,
}},
},
},
Pair : Type,
Pair = {
Pair : Type -> Type -> Type,
Pair = a -> b -> { left : a, right : b },
semigroup : {{ Semigroup A }} -> {{ Semigroup B }} -> {{ Semigroup (Pair A B) }},
semigroup = {{
append = { a0, b0 } { a1, b1 } -> { append a0 a1, append b0 b1 }
}},
monoid : {{ Monoid A }} -> {{ Monoid B }} -> {{ Monoid (Pair A B) }},
monoid = {{
empty = { empty, empty },
}},
},
Either : Type,
Either = {
Either : Type -> Type -> Type,
Either = a b -> [ left : a, right : b ],
Left : Type,
Left = {
semigroup : {{ Semigroup (Either A B) }},
semigroup = {{
append = l _ -> l,
}},
},
Right : Type,
Right = {
semigroup : {{ Semigroup (Either A B) }},
semigroup = {{
append = _ r -> r
}},
},
},
List : Type,
List = {
List : Type -> Type,
List = a -> [ nil : {} , cons : { head : A , tail : List a } ],
semigroup : {{ Semigroup (List a) }},
semigroup = {{
append = [
nil = {} -> b -> b,
cons = {a, as} -> b -> cons a (append as b),
],
}},
monoid : {{ Monoid (List a) }},
monoid = {{
empty = nil,
}},
},
{# prim char Char #},
Char : Type,
{# prim string String #},
String : Type,
String = List Char,
{# builtin show Show #},
Show : Type -> Class,
Show = a -> {{
show : a -> String,
}},

View file

@ -0,0 +1,58 @@
-- Haskell data types for the abstract syntax.
-- Generated by the BNF converter.
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
module Language.Lang.AbsGrammer where
import Prelude (Char, Double, Integer, String)
import qualified Prelude as C (Eq, Ord, Show, Read)
import qualified Data.String
newtype Name = Name String
deriving (C.Eq, C.Ord, C.Show, C.Read, Data.String.IsString)
newtype NumericLiteral = NumericLiteral String
deriving (C.Eq, C.Ord, C.Show, C.Read, Data.String.IsString)
data Module = Module [Binding]
deriving (C.Eq, C.Ord, C.Show, C.Read)
data Binding
= BindingDeclaration Declaration
| BindingDefinition Definition
| BindingPragma Pragma
deriving (C.Eq, C.Ord, C.Show, C.Read)
data Declaration
= DeclarationNamed Name Type | DeclarationAnonymous Type
deriving (C.Eq, C.Ord, C.Show, C.Read)
data Definition
= DefinitionNamed Name Expression | DefinitionAnonymous Expression
deriving (C.Eq, C.Ord, C.Show, C.Read)
data Type
= TypeName Name
| TypeApplication Type Type
| TypeAbstraction Type Type
| TypeImplicit [Declaration]
| TypeRecord [Declaration]
| TypeAlternative [Declaration]
| TypeParens Type
deriving (C.Eq, C.Ord, C.Show, C.Read)
data Pragma = Pragma [Name]
deriving (C.Eq, C.Ord, C.Show, C.Read)
data Expression
= ExpressionName Name
| ExpressionLiteral NumericLiteral
| ExpressionApplication Expression Expression
| ExpressionAbstraction Expression Expression
| ExpressionImplicit Module
| ExpressionRecord Module
| ExpressionAlternative Module
| ExpressionParens Expression
deriving (C.Eq, C.Ord, C.Show, C.Read)

View file

@ -0,0 +1,85 @@
The Language grammer
BNF Converter
%This txt2tags file is machine-generated by the BNF-converter
%Process by txt2tags to generate html or latex
This document was automatically generated by the //BNF-Converter//. It was generated together with the lexer, the parser, and the abstract syntax module, which guarantees that the document matches with the implementation of the language (provided no hand-hacking has taken place).
==The lexical structure of grammer==
===Literals===
Name literals are recognized by the regular expression
`````(["'_"] | letter) (["'-._"] | digit | letter)*`````
NumericLiteral literals are recognized by the regular expression
`````digit+ '.'? digit* | digit* '.'? digit+ ('e' digit+)?`````
===Reserved words and symbols===
The set of reserved words is the set of terminals appearing in the grammar. Those reserved words that consist of non-letter characters are called symbols, and they are treated in a different way from those that are similar to identifiers. The lexer follows rules familiar from languages like Haskell, C, and Java, including longest match and spacing conventions.
The reserved words used in grammer are the following:
There are no reserved words in grammer.
The symbols used in grammer are the following:
| {# | #} | : | =
| -> | {{ | }} | {
| } | [ | ] | (
| ) | , | |
===Comments===
Single-line comments begin with --.Multiple-line comments are enclosed with {- and -}.
==The syntactic structure of grammer==
Non-terminals are enclosed between < and >.
The symbols -> (production), **|** (union)
and **eps** (empty rule) belong to the BNF notation.
All other symbols are terminals.
| //Module// | -> | //[Binding]//
| //Binding// | -> | //Declaration//
| | **|** | //Definition//
| | **|** | ``{#`` //Pragma// ``#}``
| //Declaration// | -> | //Name// ``:`` //Type//
| | **|** | //Type//
| //Definition// | -> | //Name// ``=`` //Expression//
| | **|** | //Expression//
| //Type// | -> | //Name//
| | **|** | //Type// //Type//
| | **|** | //Type// ``->`` //Type//
| | **|** | ``{{`` //[Declaration]// ``}}``
| | **|** | ``{`` //[Declaration]// ``}``
| | **|** | ``[`` //[Declaration]// ``]``
| | **|** | ``(`` //Type// ``)``
| //Pragma// | -> | //[Name]//
| //[Binding]// | -> | **eps**
| | **|** | //Binding//
| | **|** | //Binding// ``,`` //[Binding]//
| //[Declaration]// | -> | **eps**
| | **|** | //Declaration//
| | **|** | //Declaration// ``,`` //[Declaration]//
| //[Definition]// | -> | **eps**
| | **|** | //Definition//
| | **|** | //Definition// ``,`` //[Definition]//
| //[Type]// | -> | //Type//
| | **|** | //Type// ``,`` //[Type]//
| //[Name]// | -> | //Name//
| | **|** | //Name// //[Name]//
| //Expression// | -> | //Name//
| | **|** | //NumericLiteral//
| | **|** | //Expression// //Expression//
| | **|** | //Expression// ``->`` //Expression//
| | **|** | ``{{`` //Module// ``}}``
| | **|** | ``{`` //Module// ``}``
| | **|** | ``[`` //Module// ``]``
| | **|** | ``(`` //Expression// ``)``

85
src/Language/Lang/ErrM.hs Normal file
View file

@ -0,0 +1,85 @@
{-# LANGUAGE CPP #-}
#if __GLASGOW_HASKELL__ >= 708
---------------------------------------------------------------------------
-- Pattern synonyms exist since ghc 7.8.
-- | BNF Converter: Error Monad.
--
-- Module for backwards compatibility.
--
-- The generated parser now uses @'Either' String@ as error monad.
-- This module defines a type synonym 'Err' and pattern synonyms
-- 'Bad' and 'Ok' for 'Left' and 'Right'.
{-# LANGUAGE PatternSynonyms #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE TypeSynonymInstances #-}
module Language.Lang.ErrM where
import Control.Monad (MonadPlus(..))
import Control.Applicative (Alternative(..))
-- | Error monad with 'String' error messages.
type Err = Either String
pattern Bad msg = Left msg
pattern Ok a = Right a
#if __GLASGOW_HASKELL__ >= 808
instance MonadFail Err where
fail = Bad
#endif
instance Alternative Err where
empty = Left "Err.empty"
(<|>) Left{} = id
(<|>) x@Right{} = const x
instance MonadPlus Err where
mzero = empty
mplus = (<|>)
#else
---------------------------------------------------------------------------
-- ghc 7.6 and before: use old definition as data type.
-- | BNF Converter: Error Monad
-- Copyright (C) 2004 Author: Aarne Ranta
-- This file comes with NO WARRANTY and may be used FOR ANY PURPOSE.
module Language.Lang.ErrM where
-- the Error monad: like Maybe type with error msgs
import Control.Applicative (Applicative(..), Alternative(..))
import Control.Monad (MonadPlus(..), liftM)
data Err a = Ok a | Bad String
deriving (Read, Show, Eq, Ord)
instance Monad Err where
return = Ok
Ok a >>= f = f a
Bad s >>= _ = Bad s
instance Applicative Err where
pure = Ok
(Bad s) <*> _ = Bad s
(Ok f) <*> o = liftM f o
instance Functor Err where
fmap = liftM
instance MonadPlus Err where
mzero = Bad "Err.mzero"
mplus (Bad _) y = y
mplus x _ = x
instance Alternative Err where
empty = mzero
(<|>) = mplus
#endif

View file

@ -0,0 +1,275 @@
module Language.Lang.LayoutGrammer where
import Language.Lang.LexGrammer
import Data.Maybe (isNothing, fromJust)
-- Generated by the BNF Converter
-- local parameters
topLayout :: Bool
topLayout = False
layoutWords, layoutStopWords :: [String]
layoutWords = ["->"]
layoutStopWords = []
-- layout separators
layoutOpen, layoutClose, layoutSep :: String
layoutOpen = "{"
layoutClose = "}"
layoutSep = ";"
-- | Replace layout syntax with explicit layout tokens.
resolveLayout :: Bool -- ^ Whether to use top-level layout.
-> [Token] -> [Token]
resolveLayout tp = res Nothing [if tl then Implicit 1 else Explicit]
where
-- Do top-level layout if the function parameter and the grammar say so.
tl = tp && topLayout
res :: Maybe Token -- ^ The previous token, if any.
-> [Block] -- ^ A stack of layout blocks.
-> [Token] -> [Token]
-- The stack should never be empty.
res _ [] ts = error $ "Layout error: stack empty. Tokens: " ++ show ts
res _ st (t0:ts)
-- We found an open brace in the input,
-- put an explicit layout block on the stack.
-- This is done even if there was no layout word,
-- to keep opening and closing braces.
| isLayoutOpen t0 = moveAlong (Explicit:st) [t0] ts
-- We are in an implicit layout block
res pt st@(Implicit n:ns) (t0:ts)
-- End of implicit block by a layout stop word
| isStop t0 =
-- Exit the current block and all implicit blocks
-- more indented than the current token
let (ebs,ns') = span (`moreIndent` column t0) ns
moreIndent (Implicit x) y = x > y
moreIndent Explicit _ = False
-- the number of blocks exited
b = 1 + length ebs
bs = replicate b layoutClose
-- Insert closing braces after the previous token.
(ts1,ts2) = splitAt (1+b) $ addTokens (afterPrev pt) bs (t0:ts)
in moveAlong ns' ts1 ts2
-- End of an implicit layout block
| newLine pt t0 && column t0 < n =
-- Insert a closing brace after the previous token.
let b:t0':ts' = addToken (afterPrev pt) layoutClose (t0:ts)
-- Repeat, with the current block removed from the stack
in moveAlong ns [b] (t0':ts')
res pt st (t0:ts)
-- Start a new layout block if the first token is a layout word
| isLayout t0 =
case ts of
-- Explicit layout, just move on. The case above
-- will push an explicit layout block.
t1:_ | isLayoutOpen t1 -> moveAlong st [t0] ts
-- The column of the next token determines the starting column
-- of the implicit layout block.
-- However, the next block needs to be strictly more indented
-- than the previous block.
_ -> let col = max (indentation st + 1) $
-- at end of file, the start column doesn't matter
if null ts then column t0 else column (head ts)
-- insert an open brace after the layout word
b:ts' = addToken (nextPos t0) layoutOpen ts
-- save the start column
st' = Implicit col:st
in -- Do we have to insert an extra layoutSep?
case st of
Implicit n:_
| newLine pt t0 && column t0 == n
&& not (isNothing pt ||
isTokenIn [layoutSep,layoutOpen] (fromJust pt)) ->
let b':t0':b'':ts'' =
addToken (afterPrev pt) layoutSep (t0:b:ts')
in moveAlong st' [b',t0',b''] ts'
_ -> moveAlong st' [t0,b] ts'
-- If we encounter a closing brace, exit the first explicit layout block.
| isLayoutClose t0 =
let st' = drop 1 (dropWhile isImplicit st)
in if null st'
then error $ "Layout error: Found " ++ layoutClose ++ " at ("
++ show (line t0) ++ "," ++ show (column t0)
++ ") without an explicit layout block."
else moveAlong st' [t0] ts
-- Insert separator if necessary.
res pt st@(Implicit n:ns) (t0:ts)
-- Encounted a new line in an implicit layout block.
| newLine pt t0 && column t0 == n =
-- Insert a semicolon after the previous token.
-- unless we are the beginning of the file,
-- or the previous token is a semicolon or open brace.
if isNothing pt || isTokenIn [layoutSep,layoutOpen] (fromJust pt)
then moveAlong st [t0] ts
else let b:t0':ts' = addToken (afterPrev pt) layoutSep (t0:ts)
in moveAlong st [b,t0'] ts'
-- Nothing to see here, move along.
res _ st (t:ts) = moveAlong st [t] ts
-- At EOF: skip explicit blocks.
res (Just t) (Explicit:bs) [] | null bs = []
| otherwise = res (Just t) bs []
-- If we are using top-level layout, insert a semicolon after
-- the last token, if there isn't one already
res (Just t) [Implicit _n] []
| isTokenIn [layoutSep] t = []
| otherwise = addToken (nextPos t) layoutSep []
-- At EOF in an implicit, non-top-level block: close the block
res (Just t) (Implicit _n:bs) [] =
let c = addToken (nextPos t) layoutClose []
in moveAlong bs c []
-- This should only happen if the input is empty.
res Nothing _st [] = []
-- | Move on to the next token.
moveAlong :: [Block] -- ^ The layout stack.
-> [Token] -- ^ Any tokens just processed.
-> [Token] -- ^ the rest of the tokens.
-> [Token]
moveAlong _ [] _ = error "Layout error: moveAlong got [] as old tokens"
moveAlong st ot ts = ot ++ res (Just $ last ot) st ts
newLine :: Maybe Token -> Token -> Bool
newLine pt t0 = case pt of
Nothing -> True
Just t -> line t /= line t0
data Block
= Implicit Int -- ^ An implicit layout block with its start column.
| Explicit
deriving Show
-- | Get current indentation. 0 if we are in an explicit block.
indentation :: [Block] -> Int
indentation (Implicit n : _) = n
indentation _ = 0
-- | Check if s block is implicit.
isImplicit :: Block -> Bool
isImplicit (Implicit _) = True
isImplicit _ = False
type Position = Posn
-- | Insert a number of tokens at the begninning of a list of tokens.
addTokens :: Position -- ^ Position of the first new token.
-> [String] -- ^ Token symbols.
-> [Token] -- ^ The rest of the tokens. These will have their
-- positions updated to make room for the new tokens .
-> [Token]
addTokens p ss ts = foldr (addToken p) ts ss
-- | Insert a new symbol token at the begninning of a list of tokens.
addToken :: Position -- ^ Position of the new token.
-> String -- ^ Symbol in the new token.
-> [Token] -- ^ The rest of the tokens. These will have their
-- positions updated to make room for the new token.
-> [Token]
addToken p s ts = sToken p s : map (incrGlobal p (length s)) ts
-- | Get the position immediately to the right of the given token.
-- If no token is given, gets the first position in the file.
afterPrev :: Maybe Token -> Position
afterPrev = maybe (Pn 0 1 1) nextPos
-- | Get the position immediately to the right of the given token.
nextPos :: Token -> Position
nextPos t = Pn (g + s) l (c + s + 1)
where Pn g l c = position t
s = tokenLength t
-- | Add to the global and column positions of a token.
-- The column position is only changed if the token is on
-- the same line as the given position.
incrGlobal :: Position -- ^ If the token is on the same line
-- as this position, update the column position.
-> Int -- ^ Number of characters to add to the position.
-> Token -> Token
incrGlobal (Pn _ l0 _) i (PT (Pn g l c) t) =
if l /= l0 then PT (Pn (g + i) l c) t
else PT (Pn (g + i) l (c + i)) t
incrGlobal _ _ p = error $ "cannot add token at " ++ show p
-- | Create a symbol token.
sToken :: Position -> String -> Token
sToken p s = PT p (TS s i)
where
i = case s of
"#}" -> 1
"(" -> 2
")" -> 3
"," -> 4
"->" -> 5
":" -> 6
"=" -> 7
"[" -> 8
"]" -> 9
"{" -> 10
"{#" -> 11
"{{" -> 12
"}" -> 13
"}}" -> 14
_ -> error $ "not a reserved word: " ++ show s
-- | Get the position of a token.
position :: Token -> Position
position t = case t of
PT p _ -> p
Err p -> p
-- | Get the line number of a token.
line :: Token -> Int
line t = case position t of Pn _ l _ -> l
-- | Get the column number of a token.
column :: Token -> Int
column t = case position t of Pn _ _ c -> c
-- | Check if a token is one of the given symbols.
isTokenIn :: [String] -> Token -> Bool
isTokenIn ts t = case t of
PT _ (TS r _) | r `elem` ts -> True
_ -> False
-- | Check if a word is a layout start token.
isLayout :: Token -> Bool
isLayout = isTokenIn layoutWords
-- | Check if a token is a layout stop token.
isStop :: Token -> Bool
isStop = isTokenIn layoutStopWords
-- | Check if a token is the layout open token.
isLayoutOpen :: Token -> Bool
isLayoutOpen = isTokenIn [layoutOpen]
-- | Check if a token is the layout close token.
isLayoutClose :: Token -> Bool
isLayoutClose = isTokenIn [layoutClose]
-- | Get the number of characters in the token.
tokenLength :: Token -> Int
tokenLength t = length $ prToken t

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,200 @@
-- -*- haskell -*-
-- This Alex file was machine-generated by the BNF converter
{
{-# OPTIONS -fno-warn-incomplete-patterns #-}
{-# OPTIONS_GHC -w #-}
module Language.Lang.LexGrammer where
import qualified Data.Bits
import Data.Word (Word8)
import Data.Char (ord)
}
$c = [A-Z\192-\221] # [\215] -- capital isolatin1 letter (215 = \times) FIXME
$s = [a-z\222-\255] # [\247] -- small isolatin1 letter (247 = \div ) FIXME
$l = [$c $s] -- letter
$d = [0-9] -- digit
$i = [$l $d _ '] -- identifier character
$u = [. \n] -- universal: any character
@rsyms = -- symbols and non-identifier-like reserved words
\{ \# | \# \} | \: | \= | \- \> | \{ \{ | \} \} | \{ | \} | \[ | \] | \( | \) | \,
:-
-- Line comments
"--" [.]* ;
-- Block comments
"{-" [$u # \-]* \- ([$u # [\- \}]] [$u # \-]* \- | \-)* \} ;
$white+ ;
@rsyms
{ tok (\p s -> PT p (eitherResIdent (TV . share) s)) }
([\' \_]| $l)([\' \- \. \_]| ($d | $l)) *
{ tok (\p s -> PT p (eitherResIdent (T_Name . share) s)) }
$d + \. ? $d * | $d * \. ? $d + (e $d +)?
{ tok (\p s -> PT p (eitherResIdent (T_NumericLiteral . share) s)) }
$l $i*
{ tok (\p s -> PT p (eitherResIdent (TV . share) s)) }
{
tok :: (Posn -> String -> Token) -> (Posn -> String -> Token)
tok f p s = f p s
share :: String -> String
share = id
data Tok =
TS !String !Int -- reserved words and symbols
| TL !String -- string literals
| TI !String -- integer literals
| TV !String -- identifiers
| TD !String -- double precision float literals
| TC !String -- character literals
| T_Name !String
| T_NumericLiteral !String
deriving (Eq,Show,Ord)
data Token =
PT Posn Tok
| Err Posn
deriving (Eq,Show,Ord)
printPosn :: Posn -> String
printPosn (Pn _ l c) = "line " ++ show l ++ ", column " ++ show c
tokenPos :: [Token] -> String
tokenPos (t:_) = printPosn (tokenPosn t)
tokenPos [] = "end of file"
tokenPosn :: Token -> Posn
tokenPosn (PT p _) = p
tokenPosn (Err p) = p
tokenLineCol :: Token -> (Int, Int)
tokenLineCol = posLineCol . tokenPosn
posLineCol :: Posn -> (Int, Int)
posLineCol (Pn _ l c) = (l,c)
mkPosToken :: Token -> ((Int, Int), String)
mkPosToken t@(PT p _) = (posLineCol p, prToken t)
prToken :: Token -> String
prToken t = case t of
PT _ (TS s _) -> s
PT _ (TL s) -> show s
PT _ (TI s) -> s
PT _ (TV s) -> s
PT _ (TD s) -> s
PT _ (TC s) -> s
Err _ -> "#error"
PT _ (T_Name s) -> s
PT _ (T_NumericLiteral s) -> s
data BTree = N | B String Tok BTree BTree deriving (Show)
eitherResIdent :: (String -> Tok) -> String -> Tok
eitherResIdent tv s = treeFind resWords
where
treeFind N = tv s
treeFind (B a t left right) | s < a = treeFind left
| s > a = treeFind right
| s == a = t
resWords :: BTree
resWords = b "[" 8 (b "," 4 (b "(" 2 (b "#}" 1 N N) (b ")" 3 N N)) (b ":" 6 (b "->" 5 N N) (b "=" 7 N N))) (b "{{" 12 (b "{" 10 (b "]" 9 N N) (b "{#" 11 N N)) (b "}}" 14 (b "}" 13 N N) N))
where b s n = let bs = s
in B bs (TS bs n)
unescapeInitTail :: String -> String
unescapeInitTail = id . unesc . tail . id
where
unesc s = case s of
'\\':c:cs | elem c ['\"', '\\', '\''] -> c : unesc cs
'\\':'n':cs -> '\n' : unesc cs
'\\':'t':cs -> '\t' : unesc cs
'\\':'r':cs -> '\r' : unesc cs
'\\':'f':cs -> '\f' : unesc cs
'"':[] -> []
c:cs -> c : unesc cs
_ -> []
-------------------------------------------------------------------
-- Alex wrapper code.
-- A modified "posn" wrapper.
-------------------------------------------------------------------
data Posn = Pn !Int !Int !Int
deriving (Eq, Show,Ord)
alexStartPos :: Posn
alexStartPos = Pn 0 1 1
alexMove :: Posn -> Char -> Posn
alexMove (Pn a l c) '\t' = Pn (a+1) l (((c+7) `div` 8)*8+1)
alexMove (Pn a l c) '\n' = Pn (a+1) (l+1) 1
alexMove (Pn a l c) _ = Pn (a+1) l (c+1)
type Byte = Word8
type AlexInput = (Posn, -- current position,
Char, -- previous char
[Byte], -- pending bytes on the current char
String) -- current input string
tokens :: String -> [Token]
tokens str = go (alexStartPos, '\n', [], str)
where
go :: AlexInput -> [Token]
go inp@(pos, _, _, str) =
case alexScan inp 0 of
AlexEOF -> []
AlexError (pos, _, _, _) -> [Err pos]
AlexSkip inp' len -> go inp'
AlexToken inp' len act -> act pos (take len str) : (go inp')
alexGetByte :: AlexInput -> Maybe (Byte,AlexInput)
alexGetByte (p, c, (b:bs), s) = Just (b, (p, c, bs, s))
alexGetByte (p, _, [], s) =
case s of
[] -> Nothing
(c:s) ->
let p' = alexMove p c
(b:bs) = utf8Encode c
in p' `seq` Just (b, (p', c, bs, s))
alexInputPrevChar :: AlexInput -> Char
alexInputPrevChar (p, c, bs, s) = c
-- | Encode a Haskell String to a list of Word8 values, in UTF8 format.
utf8Encode :: Char -> [Word8]
utf8Encode = map fromIntegral . go . ord
where
go oc
| oc <= 0x7f = [oc]
| oc <= 0x7ff = [ 0xc0 + (oc `Data.Bits.shiftR` 6)
, 0x80 + oc Data.Bits..&. 0x3f
]
| oc <= 0xffff = [ 0xe0 + (oc `Data.Bits.shiftR` 12)
, 0x80 + ((oc `Data.Bits.shiftR` 6) Data.Bits..&. 0x3f)
, 0x80 + oc Data.Bits..&. 0x3f
]
| otherwise = [ 0xf0 + (oc `Data.Bits.shiftR` 18)
, 0x80 + ((oc `Data.Bits.shiftR` 12) Data.Bits..&. 0x3f)
, 0x80 + ((oc `Data.Bits.shiftR` 6) Data.Bits..&. 0x3f)
, 0x80 + oc Data.Bits..&. 0x3f
]
}

View file

@ -0,0 +1,914 @@
{-# OPTIONS_GHC -w #-}
{-# OPTIONS -XMagicHash -XBangPatterns -XTypeSynonymInstances -XFlexibleInstances -cpp #-}
#if __GLASGOW_HASKELL__ >= 710
{-# OPTIONS_GHC -XPartialTypeSignatures #-}
#endif
{-# OPTIONS_GHC -fno-warn-incomplete-patterns -fno-warn-overlapping-patterns #-}
module Language.Lang.ParGrammer where
import qualified Language.Lang.AbsGrammer
import Language.Lang.LexGrammer
import qualified Data.Array as Happy_Data_Array
import qualified Data.Bits as Bits
import qualified GHC.Exts as Happy_GHC_Exts
import Control.Applicative(Applicative(..))
import Control.Monad (ap)
-- parser produced by Happy Version 1.19.12
newtype HappyAbsSyn = HappyAbsSyn HappyAny
#if __GLASGOW_HASKELL__ >= 607
type HappyAny = Happy_GHC_Exts.Any
#else
type HappyAny = forall a . a
#endif
newtype HappyWrap4 = HappyWrap4 (Language.Lang.AbsGrammer.Name)
happyIn4 :: (Language.Lang.AbsGrammer.Name) -> (HappyAbsSyn )
happyIn4 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap4 x)
{-# INLINE happyIn4 #-}
happyOut4 :: (HappyAbsSyn ) -> HappyWrap4
happyOut4 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut4 #-}
newtype HappyWrap5 = HappyWrap5 (Language.Lang.AbsGrammer.NumericLiteral)
happyIn5 :: (Language.Lang.AbsGrammer.NumericLiteral) -> (HappyAbsSyn )
happyIn5 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap5 x)
{-# INLINE happyIn5 #-}
happyOut5 :: (HappyAbsSyn ) -> HappyWrap5
happyOut5 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut5 #-}
newtype HappyWrap6 = HappyWrap6 (Language.Lang.AbsGrammer.Module)
happyIn6 :: (Language.Lang.AbsGrammer.Module) -> (HappyAbsSyn )
happyIn6 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap6 x)
{-# INLINE happyIn6 #-}
happyOut6 :: (HappyAbsSyn ) -> HappyWrap6
happyOut6 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut6 #-}
newtype HappyWrap7 = HappyWrap7 (Language.Lang.AbsGrammer.Binding)
happyIn7 :: (Language.Lang.AbsGrammer.Binding) -> (HappyAbsSyn )
happyIn7 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap7 x)
{-# INLINE happyIn7 #-}
happyOut7 :: (HappyAbsSyn ) -> HappyWrap7
happyOut7 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut7 #-}
newtype HappyWrap8 = HappyWrap8 (Language.Lang.AbsGrammer.Declaration)
happyIn8 :: (Language.Lang.AbsGrammer.Declaration) -> (HappyAbsSyn )
happyIn8 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap8 x)
{-# INLINE happyIn8 #-}
happyOut8 :: (HappyAbsSyn ) -> HappyWrap8
happyOut8 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut8 #-}
newtype HappyWrap9 = HappyWrap9 (Language.Lang.AbsGrammer.Definition)
happyIn9 :: (Language.Lang.AbsGrammer.Definition) -> (HappyAbsSyn )
happyIn9 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap9 x)
{-# INLINE happyIn9 #-}
happyOut9 :: (HappyAbsSyn ) -> HappyWrap9
happyOut9 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut9 #-}
newtype HappyWrap10 = HappyWrap10 (Language.Lang.AbsGrammer.Type)
happyIn10 :: (Language.Lang.AbsGrammer.Type) -> (HappyAbsSyn )
happyIn10 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap10 x)
{-# INLINE happyIn10 #-}
happyOut10 :: (HappyAbsSyn ) -> HappyWrap10
happyOut10 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut10 #-}
newtype HappyWrap11 = HappyWrap11 (Language.Lang.AbsGrammer.Pragma)
happyIn11 :: (Language.Lang.AbsGrammer.Pragma) -> (HappyAbsSyn )
happyIn11 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap11 x)
{-# INLINE happyIn11 #-}
happyOut11 :: (HappyAbsSyn ) -> HappyWrap11
happyOut11 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut11 #-}
newtype HappyWrap12 = HappyWrap12 ([Language.Lang.AbsGrammer.Binding])
happyIn12 :: ([Language.Lang.AbsGrammer.Binding]) -> (HappyAbsSyn )
happyIn12 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap12 x)
{-# INLINE happyIn12 #-}
happyOut12 :: (HappyAbsSyn ) -> HappyWrap12
happyOut12 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut12 #-}
newtype HappyWrap13 = HappyWrap13 ([Language.Lang.AbsGrammer.Declaration])
happyIn13 :: ([Language.Lang.AbsGrammer.Declaration]) -> (HappyAbsSyn )
happyIn13 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap13 x)
{-# INLINE happyIn13 #-}
happyOut13 :: (HappyAbsSyn ) -> HappyWrap13
happyOut13 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut13 #-}
newtype HappyWrap14 = HappyWrap14 ([Language.Lang.AbsGrammer.Definition])
happyIn14 :: ([Language.Lang.AbsGrammer.Definition]) -> (HappyAbsSyn )
happyIn14 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap14 x)
{-# INLINE happyIn14 #-}
happyOut14 :: (HappyAbsSyn ) -> HappyWrap14
happyOut14 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut14 #-}
newtype HappyWrap15 = HappyWrap15 ([Language.Lang.AbsGrammer.Type])
happyIn15 :: ([Language.Lang.AbsGrammer.Type]) -> (HappyAbsSyn )
happyIn15 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap15 x)
{-# INLINE happyIn15 #-}
happyOut15 :: (HappyAbsSyn ) -> HappyWrap15
happyOut15 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut15 #-}
newtype HappyWrap16 = HappyWrap16 ([Language.Lang.AbsGrammer.Name])
happyIn16 :: ([Language.Lang.AbsGrammer.Name]) -> (HappyAbsSyn )
happyIn16 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap16 x)
{-# INLINE happyIn16 #-}
happyOut16 :: (HappyAbsSyn ) -> HappyWrap16
happyOut16 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut16 #-}
newtype HappyWrap17 = HappyWrap17 (Language.Lang.AbsGrammer.Expression)
happyIn17 :: (Language.Lang.AbsGrammer.Expression) -> (HappyAbsSyn )
happyIn17 x = Happy_GHC_Exts.unsafeCoerce# (HappyWrap17 x)
{-# INLINE happyIn17 #-}
happyOut17 :: (HappyAbsSyn ) -> HappyWrap17
happyOut17 x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOut17 #-}
happyInTok :: (Token) -> (HappyAbsSyn )
happyInTok x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyInTok #-}
happyOutTok :: (HappyAbsSyn ) -> (Token)
happyOutTok x = Happy_GHC_Exts.unsafeCoerce# x
{-# INLINE happyOutTok #-}
happyExpList :: HappyAddr
happyExpList = HappyA# "\x00\x00\x04\x9d\x01\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x54\x02\x00\x00\x00\x00\x00\x00\x49\x65\x00\x00\x04\x95\x01\x00\x10\x74\x06\x00\x40\xd0\x19\x00\x00\x00\x20\x00\x00\x04\x9d\x01\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x02\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x80\x00\x00\x00\x20\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\xb0\x54\x02\x00\xc0\x52\x19\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x54\x06\x00\x40\x50\x19\x00\x00\x41\x67\x00\x00\x04\x9d\x01\x00\x10\x74\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x95\x00\x00\x10\x54\x02\x00\x40\x50\x09\x00\x00\x41\x25\x00\x00\x04\x95\x00\x00\x10\x74\x06\x00\x40\x50\x09\x00\x00\x41\x65\x00\x00\x24\x95\x01\x00\x90\x54\x02\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x95\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"#
{-# NOINLINE happyExpListPerState #-}
happyExpListPerState st =
token_strs_expected
where token_strs = ["error","%dummy","%start_pModule","Name","NumericLiteral","Module","Binding","Declaration","Definition","Type","Pragma","ListBinding","ListDeclaration","ListDefinition","ListType","ListName","Expression","'#}'","'('","')'","','","'->'","':'","'='","'['","']'","'{'","'{#'","'{{'","'}'","'}}'","L_Name","L_NumericLiteral","%eof"]
bit_start = st * 34
bit_end = (st + 1) * 34
read_bit = readArrayBit happyExpList
bits = map read_bit [bit_start..bit_end - 1]
bits_indexed = zip bits [0..33]
token_strs_expected = concatMap f bits_indexed
f (False, _) = []
f (True, nr) = [token_strs !! nr]
happyActOffsets :: HappyAddr
happyActOffsets = HappyA# "\x3c\x00\xf3\xff\x00\x00\xff\xff\x00\x00\xf9\xff\x10\x00\x00\x00\x00\x00\xfa\x00\x00\x00\x48\x00\x66\x00\x0d\x00\x18\x00\x1a\x00\x24\x00\x00\x00\x1f\x00\x0a\x01\x23\x00\x2b\x00\x3b\x00\x00\x00\x34\x00\x41\x00\x4c\x00\x4d\x00\xe6\x00\xf1\x00\x33\x00\x00\x00\x48\x00\x6f\x00\x6f\x00\x51\x00\x51\x00\x51\x00\x00\x00\xfa\x00\x03\x01\x03\x01\x03\x01\x03\x01\x03\x01\x51\x00\x03\x01\x6f\x00\x5d\x00\xfa\x00\x00\x00\x4b\x00\x56\x00\xfa\x00\x5d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x01\x00\x00\x00\x00\x00\x00"#
happyGotoOffsets :: HappyAddr
happyGotoOffsets = HappyA# "\xaa\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x00\x00\x11\x00\x10\x01\x80\x00\x8e\x00\x39\x00\x9c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1b\x00\x11\x00\x00\x00\x11\x00\x2a\x00\x8b\x00\xb8\x00\xc6\x00\xd4\x00\x00\x00\x1b\x00\x49\x00\x5e\x00\x74\x00\x15\x01\x1c\x01\xdf\x00\xf8\x00\x99\x00\xa7\x00\x01\x01\x00\x00\x00\x00\x00\x00\x01\x01\xa7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x23\x01\x00\x00\x00\x00\x00\x00"#
happyAdjustOffset :: Happy_GHC_Exts.Int# -> Happy_GHC_Exts.Int#
happyAdjustOffset off = off
happyDefActions :: HappyAddr
happyDefActions = HappyA# "\xec\xff\x00\x00\xfe\xff\xdf\xff\xde\xff\x00\x00\xeb\xff\xfb\xff\xfa\xff\xf7\xff\xfc\xff\xf5\xff\x00\x00\xe9\xff\xe9\xff\x00\x00\xe9\xff\xfd\xff\x00\x00\xe8\xff\x00\x00\xe1\xff\x00\x00\xed\xff\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xff\x00\x00\x00\x00\xdf\xff\xdd\xff\x00\x00\x00\x00\xec\xff\xec\xff\xec\xff\xf4\xff\xf3\xff\x00\x00\x00\x00\xe9\xff\xe9\xff\xe9\xff\xec\xff\x00\x00\x00\x00\xf6\xff\xf8\xff\xea\xff\xf4\xff\xe8\xff\xf2\xff\xdc\xff\xd8\xff\xee\xff\xef\xff\xd9\xff\xf0\xff\xda\xff\xf9\xff\xe0\xff\xf1\xff\xe9\xff\xdb\xff\xe7\xff"#
happyCheck :: HappyAddr
happyCheck = HappyA# "\xff\xff\x02\x00\x0f\x00\x04\x00\x05\x00\x06\x00\x07\x00\x08\x00\x09\x00\x0a\x00\x11\x00\x0c\x00\x0d\x00\x0e\x00\x0f\x00\x02\x00\x11\x00\x00\x00\x01\x00\x00\x00\x04\x00\x08\x00\x09\x00\x0a\x00\x0b\x00\x0c\x00\x02\x00\x00\x00\x0f\x00\x10\x00\x0d\x00\x0c\x00\x08\x00\x06\x00\x0a\x00\x0b\x00\x0c\x00\x0d\x00\x02\x00\x0f\x00\x10\x00\x0f\x00\x00\x00\x01\x00\x08\x00\x0e\x00\x0a\x00\x0b\x00\x0c\x00\x0e\x00\x0e\x00\x0f\x00\x10\x00\x02\x00\x03\x00\x0d\x00\x05\x00\x00\x00\x0f\x00\x08\x00\x01\x00\x0a\x00\x02\x00\x0c\x00\x07\x00\x0d\x00\x0f\x00\x10\x00\x08\x00\x0c\x00\x0a\x00\x0b\x00\x0c\x00\x00\x00\x02\x00\x0f\x00\x10\x00\x05\x00\x0d\x00\x06\x00\x08\x00\x06\x00\x0a\x00\x02\x00\x0c\x00\x09\x00\x09\x00\x0f\x00\x10\x00\x08\x00\x04\x00\x0a\x00\x0b\x00\x0c\x00\x00\x00\x02\x00\x0f\x00\x10\x00\x05\x00\xff\xff\x06\x00\x08\x00\xff\xff\x0a\x00\x02\x00\x0c\x00\xff\xff\xff\xff\x0f\x00\x10\x00\x08\x00\xff\xff\x0a\x00\x02\x00\x0c\x00\xff\xff\x00\x00\x0f\x00\x10\x00\x08\x00\x04\x00\x0a\x00\x06\x00\x0c\x00\xff\xff\x09\x00\x0f\x00\x10\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\x09\x00\xff\xff\x00\x00\x01\x00\x0d\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\x09\x00\x0d\x00\x00\x00\x01\x00\x0d\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\x09\x00\x0d\x00\x00\x00\x01\x00\x0d\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\xff\xff\x0d\x00\xff\xff\xff\xff\x0d\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\xff\xff\xff\xff\xff\xff\xff\xff\x0d\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\xff\xff\xff\xff\xff\xff\xff\xff\x0d\x00\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\xff\xff\xff\xff\x00\x00\x01\x00\x0d\x00\x03\x00\x04\x00\x05\x00\x06\x00\xff\xff\x08\x00\x02\x00\x03\x00\xff\xff\x05\x00\x0d\x00\xff\xff\x08\x00\xff\xff\x0a\x00\xff\xff\x0c\x00\x02\x00\x03\x00\x0f\x00\x05\x00\xff\xff\x00\x00\x08\x00\xff\xff\x0a\x00\x02\x00\x0c\x00\x06\x00\x05\x00\x0f\x00\x00\x00\x08\x00\xff\xff\x0a\x00\x02\x00\x0c\x00\x06\x00\xff\xff\x0f\x00\xff\xff\x08\x00\xff\xff\x0a\x00\x04\x00\x0c\x00\x00\x00\x01\x00\x0f\x00\x09\x00\xff\xff\x00\x00\x06\x00\x0d\x00\x0e\x00\x04\x00\xff\xff\x06\x00\x00\x00\x0d\x00\x09\x00\xff\xff\x04\x00\xff\xff\x06\x00\x00\x00\xff\xff\x09\x00\xff\xff\x04\x00\xff\xff\x06\x00\xff\xff\xff\xff\x09\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"#
happyTable :: HappyAddr
happyTable = HappyA# "\x00\x00\xdf\xff\x03\x00\xdf\xff\xdf\xff\x2f\x00\x30\x00\xdf\xff\xdf\xff\xdf\xff\xff\xff\xdf\xff\xdf\xff\xdf\xff\xdf\xff\x0d\x00\xdf\xff\x1f\x00\x04\x00\x15\x00\x2e\x00\x0e\x00\xe9\xff\x0f\x00\x10\x00\x11\x00\x0d\x00\x26\x00\x03\x00\x12\x00\x20\x00\x3e\x00\x0e\x00\x27\x00\x0f\x00\x10\x00\x11\x00\xe9\xff\x0d\x00\x03\x00\x12\x00\x03\x00\x1f\x00\x04\x00\x0e\x00\x42\x00\x0f\x00\x10\x00\x11\x00\x40\x00\xe9\xff\x03\x00\x12\x00\x22\x00\x38\x00\x1e\x00\x23\x00\x15\x00\x03\x00\x24\x00\x3e\x00\x25\x00\x0d\x00\x26\x00\x16\x00\x3d\x00\x03\x00\x12\x00\x0e\x00\x17\x00\x0f\x00\x10\x00\x11\x00\x26\x00\x22\x00\x03\x00\x12\x00\x23\x00\x3c\x00\x1d\x00\x24\x00\x2f\x00\x25\x00\x0d\x00\x26\x00\x3b\x00\x3a\x00\x03\x00\x12\x00\x0e\x00\x41\x00\x0f\x00\x10\x00\x11\x00\x26\x00\x22\x00\x03\x00\x12\x00\x23\x00\x00\x00\x35\x00\x24\x00\x00\x00\x25\x00\x0d\x00\x26\x00\x00\x00\x00\x00\x03\x00\x12\x00\x0e\x00\x00\x00\x0f\x00\x22\x00\x11\x00\x00\x00\x33\x00\x03\x00\x12\x00\x24\x00\x34\x00\x25\x00\x09\x00\x26\x00\x00\x00\x1b\x00\x03\x00\x12\x00\x03\x00\x04\x00\x1a\x00\x06\x00\x13\x00\x08\x00\x09\x00\x00\x00\x0a\x00\x1b\x00\x00\x00\x1f\x00\x04\x00\x0b\x00\x03\x00\x04\x00\x18\x00\x06\x00\x13\x00\x08\x00\x09\x00\x00\x00\x0a\x00\x19\x00\x36\x00\x1f\x00\x04\x00\x0b\x00\x03\x00\x04\x00\x12\x00\x06\x00\x13\x00\x08\x00\x09\x00\x00\x00\x0a\x00\x14\x00\x30\x00\x1f\x00\x04\x00\x0b\x00\x03\x00\x04\x00\x05\x00\x06\x00\x07\x00\x08\x00\x09\x00\x00\x00\x0a\x00\x00\x00\x20\x00\x00\x00\x00\x00\x0b\x00\x03\x00\x04\x00\x1a\x00\x06\x00\x07\x00\x08\x00\x09\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x03\x00\x04\x00\x18\x00\x06\x00\x07\x00\x08\x00\x09\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x03\x00\x04\x00\x12\x00\x06\x00\x07\x00\x08\x00\x09\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x03\x00\x04\x00\x0b\x00\x06\x00\x07\x00\x08\x00\x09\x00\x00\x00\x32\x00\xdf\xff\xdf\xff\x00\x00\xdf\xff\x0b\x00\x00\x00\xdf\xff\x00\x00\xdf\xff\x00\x00\xdf\xff\x29\x00\x39\x00\xdf\xff\x2a\x00\x00\x00\x26\x00\x2b\x00\x00\x00\x2c\x00\x29\x00\x2d\x00\x31\x00\x2a\x00\x03\x00\x26\x00\x2b\x00\x00\x00\x2c\x00\x29\x00\x2d\x00\x27\x00\x00\x00\x03\x00\x00\x00\x2b\x00\x00\x00\x2c\x00\x41\x00\x2d\x00\x1c\x00\x04\x00\x03\x00\xe8\xff\x00\x00\x33\x00\x1d\x00\xe8\xff\xe8\xff\x34\x00\x00\x00\x09\x00\x33\x00\x1e\x00\x19\x00\x00\x00\x34\x00\x00\x00\x09\x00\x33\x00\x00\x00\x14\x00\x00\x00\x34\x00\x00\x00\x09\x00\x00\x00\x00\x00\x42\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"#
happyReduceArr = Happy_Data_Array.array (1, 39) [
(1 , happyReduce_1),
(2 , happyReduce_2),
(3 , happyReduce_3),
(4 , happyReduce_4),
(5 , happyReduce_5),
(6 , happyReduce_6),
(7 , happyReduce_7),
(8 , happyReduce_8),
(9 , happyReduce_9),
(10 , happyReduce_10),
(11 , happyReduce_11),
(12 , happyReduce_12),
(13 , happyReduce_13),
(14 , happyReduce_14),
(15 , happyReduce_15),
(16 , happyReduce_16),
(17 , happyReduce_17),
(18 , happyReduce_18),
(19 , happyReduce_19),
(20 , happyReduce_20),
(21 , happyReduce_21),
(22 , happyReduce_22),
(23 , happyReduce_23),
(24 , happyReduce_24),
(25 , happyReduce_25),
(26 , happyReduce_26),
(27 , happyReduce_27),
(28 , happyReduce_28),
(29 , happyReduce_29),
(30 , happyReduce_30),
(31 , happyReduce_31),
(32 , happyReduce_32),
(33 , happyReduce_33),
(34 , happyReduce_34),
(35 , happyReduce_35),
(36 , happyReduce_36),
(37 , happyReduce_37),
(38 , happyReduce_38),
(39 , happyReduce_39)
]
happy_n_terms = 18 :: Int
happy_n_nonterms = 14 :: Int
happyReduce_1 = happySpecReduce_1 0# happyReduction_1
happyReduction_1 happy_x_1
= case happyOutTok happy_x_1 of { (PT _ (T_Name happy_var_1)) ->
happyIn4
(Language.Lang.AbsGrammer.Name happy_var_1
)}
happyReduce_2 = happySpecReduce_1 1# happyReduction_2
happyReduction_2 happy_x_1
= case happyOutTok happy_x_1 of { (PT _ (T_NumericLiteral happy_var_1)) ->
happyIn5
(Language.Lang.AbsGrammer.NumericLiteral happy_var_1
)}
happyReduce_3 = happySpecReduce_1 2# happyReduction_3
happyReduction_3 happy_x_1
= case happyOut12 happy_x_1 of { (HappyWrap12 happy_var_1) ->
happyIn6
(Language.Lang.AbsGrammer.Module happy_var_1
)}
happyReduce_4 = happySpecReduce_1 3# happyReduction_4
happyReduction_4 happy_x_1
= case happyOut8 happy_x_1 of { (HappyWrap8 happy_var_1) ->
happyIn7
(Language.Lang.AbsGrammer.BindingDeclaration happy_var_1
)}
happyReduce_5 = happySpecReduce_1 3# happyReduction_5
happyReduction_5 happy_x_1
= case happyOut9 happy_x_1 of { (HappyWrap9 happy_var_1) ->
happyIn7
(Language.Lang.AbsGrammer.BindingDefinition happy_var_1
)}
happyReduce_6 = happySpecReduce_3 3# happyReduction_6
happyReduction_6 happy_x_3
happy_x_2
happy_x_1
= case happyOut11 happy_x_2 of { (HappyWrap11 happy_var_2) ->
happyIn7
(Language.Lang.AbsGrammer.BindingPragma happy_var_2
)}
happyReduce_7 = happySpecReduce_3 4# happyReduction_7
happyReduction_7 happy_x_3
happy_x_2
happy_x_1
= case happyOut4 happy_x_1 of { (HappyWrap4 happy_var_1) ->
case happyOut10 happy_x_3 of { (HappyWrap10 happy_var_3) ->
happyIn8
(Language.Lang.AbsGrammer.DeclarationNamed happy_var_1 happy_var_3
)}}
happyReduce_8 = happySpecReduce_1 4# happyReduction_8
happyReduction_8 happy_x_1
= case happyOut10 happy_x_1 of { (HappyWrap10 happy_var_1) ->
happyIn8
(Language.Lang.AbsGrammer.DeclarationAnonymous happy_var_1
)}
happyReduce_9 = happySpecReduce_3 5# happyReduction_9
happyReduction_9 happy_x_3
happy_x_2
happy_x_1
= case happyOut4 happy_x_1 of { (HappyWrap4 happy_var_1) ->
case happyOut17 happy_x_3 of { (HappyWrap17 happy_var_3) ->
happyIn9
(Language.Lang.AbsGrammer.DefinitionNamed happy_var_1 happy_var_3
)}}
happyReduce_10 = happySpecReduce_1 5# happyReduction_10
happyReduction_10 happy_x_1
= case happyOut17 happy_x_1 of { (HappyWrap17 happy_var_1) ->
happyIn9
(Language.Lang.AbsGrammer.DefinitionAnonymous happy_var_1
)}
happyReduce_11 = happySpecReduce_1 6# happyReduction_11
happyReduction_11 happy_x_1
= case happyOut4 happy_x_1 of { (HappyWrap4 happy_var_1) ->
happyIn10
(Language.Lang.AbsGrammer.TypeName happy_var_1
)}
happyReduce_12 = happySpecReduce_2 6# happyReduction_12
happyReduction_12 happy_x_2
happy_x_1
= case happyOut10 happy_x_1 of { (HappyWrap10 happy_var_1) ->
case happyOut10 happy_x_2 of { (HappyWrap10 happy_var_2) ->
happyIn10
(Language.Lang.AbsGrammer.TypeApplication happy_var_1 happy_var_2
)}}
happyReduce_13 = happySpecReduce_3 6# happyReduction_13
happyReduction_13 happy_x_3
happy_x_2
happy_x_1
= case happyOut10 happy_x_1 of { (HappyWrap10 happy_var_1) ->
case happyOut10 happy_x_3 of { (HappyWrap10 happy_var_3) ->
happyIn10
(Language.Lang.AbsGrammer.TypeAbstraction happy_var_1 happy_var_3
)}}
happyReduce_14 = happySpecReduce_3 6# happyReduction_14
happyReduction_14 happy_x_3
happy_x_2
happy_x_1
= case happyOut13 happy_x_2 of { (HappyWrap13 happy_var_2) ->
happyIn10
(Language.Lang.AbsGrammer.TypeImplicit happy_var_2
)}
happyReduce_15 = happySpecReduce_3 6# happyReduction_15
happyReduction_15 happy_x_3
happy_x_2
happy_x_1
= case happyOut13 happy_x_2 of { (HappyWrap13 happy_var_2) ->
happyIn10
(Language.Lang.AbsGrammer.TypeRecord happy_var_2
)}
happyReduce_16 = happySpecReduce_3 6# happyReduction_16
happyReduction_16 happy_x_3
happy_x_2
happy_x_1
= case happyOut13 happy_x_2 of { (HappyWrap13 happy_var_2) ->
happyIn10
(Language.Lang.AbsGrammer.TypeAlternative happy_var_2
)}
happyReduce_17 = happySpecReduce_3 6# happyReduction_17
happyReduction_17 happy_x_3
happy_x_2
happy_x_1
= case happyOut10 happy_x_2 of { (HappyWrap10 happy_var_2) ->
happyIn10
(Language.Lang.AbsGrammer.TypeParens happy_var_2
)}
happyReduce_18 = happySpecReduce_1 7# happyReduction_18
happyReduction_18 happy_x_1
= case happyOut16 happy_x_1 of { (HappyWrap16 happy_var_1) ->
happyIn11
(Language.Lang.AbsGrammer.Pragma happy_var_1
)}
happyReduce_19 = happySpecReduce_0 8# happyReduction_19
happyReduction_19 = happyIn12
([]
)
happyReduce_20 = happySpecReduce_1 8# happyReduction_20
happyReduction_20 happy_x_1
= case happyOut7 happy_x_1 of { (HappyWrap7 happy_var_1) ->
happyIn12
((:[]) happy_var_1
)}
happyReduce_21 = happySpecReduce_3 8# happyReduction_21
happyReduction_21 happy_x_3
happy_x_2
happy_x_1
= case happyOut7 happy_x_1 of { (HappyWrap7 happy_var_1) ->
case happyOut12 happy_x_3 of { (HappyWrap12 happy_var_3) ->
happyIn12
((:) happy_var_1 happy_var_3
)}}
happyReduce_22 = happySpecReduce_0 9# happyReduction_22
happyReduction_22 = happyIn13
([]
)
happyReduce_23 = happySpecReduce_1 9# happyReduction_23
happyReduction_23 happy_x_1
= case happyOut8 happy_x_1 of { (HappyWrap8 happy_var_1) ->
happyIn13
((:[]) happy_var_1
)}
happyReduce_24 = happySpecReduce_3 9# happyReduction_24
happyReduction_24 happy_x_3
happy_x_2
happy_x_1
= case happyOut8 happy_x_1 of { (HappyWrap8 happy_var_1) ->
case happyOut13 happy_x_3 of { (HappyWrap13 happy_var_3) ->
happyIn13
((:) happy_var_1 happy_var_3
)}}
happyReduce_25 = happySpecReduce_0 10# happyReduction_25
happyReduction_25 = happyIn14
([]
)
happyReduce_26 = happySpecReduce_1 10# happyReduction_26
happyReduction_26 happy_x_1
= case happyOut9 happy_x_1 of { (HappyWrap9 happy_var_1) ->
happyIn14
((:[]) happy_var_1
)}
happyReduce_27 = happySpecReduce_3 10# happyReduction_27
happyReduction_27 happy_x_3
happy_x_2
happy_x_1
= case happyOut9 happy_x_1 of { (HappyWrap9 happy_var_1) ->
case happyOut14 happy_x_3 of { (HappyWrap14 happy_var_3) ->
happyIn14
((:) happy_var_1 happy_var_3
)}}
happyReduce_28 = happySpecReduce_1 11# happyReduction_28
happyReduction_28 happy_x_1
= case happyOut10 happy_x_1 of { (HappyWrap10 happy_var_1) ->
happyIn15
((:[]) happy_var_1
)}
happyReduce_29 = happySpecReduce_3 11# happyReduction_29
happyReduction_29 happy_x_3
happy_x_2
happy_x_1
= case happyOut10 happy_x_1 of { (HappyWrap10 happy_var_1) ->
case happyOut15 happy_x_3 of { (HappyWrap15 happy_var_3) ->
happyIn15
((:) happy_var_1 happy_var_3
)}}
happyReduce_30 = happySpecReduce_1 12# happyReduction_30
happyReduction_30 happy_x_1
= case happyOut4 happy_x_1 of { (HappyWrap4 happy_var_1) ->
happyIn16
((:[]) happy_var_1
)}
happyReduce_31 = happySpecReduce_2 12# happyReduction_31
happyReduction_31 happy_x_2
happy_x_1
= case happyOut4 happy_x_1 of { (HappyWrap4 happy_var_1) ->
case happyOut16 happy_x_2 of { (HappyWrap16 happy_var_2) ->
happyIn16
((:) happy_var_1 happy_var_2
)}}
happyReduce_32 = happySpecReduce_1 13# happyReduction_32
happyReduction_32 happy_x_1
= case happyOut4 happy_x_1 of { (HappyWrap4 happy_var_1) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionName happy_var_1
)}
happyReduce_33 = happySpecReduce_1 13# happyReduction_33
happyReduction_33 happy_x_1
= case happyOut5 happy_x_1 of { (HappyWrap5 happy_var_1) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionLiteral happy_var_1
)}
happyReduce_34 = happySpecReduce_2 13# happyReduction_34
happyReduction_34 happy_x_2
happy_x_1
= case happyOut17 happy_x_1 of { (HappyWrap17 happy_var_1) ->
case happyOut17 happy_x_2 of { (HappyWrap17 happy_var_2) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionApplication happy_var_1 happy_var_2
)}}
happyReduce_35 = happySpecReduce_3 13# happyReduction_35
happyReduction_35 happy_x_3
happy_x_2
happy_x_1
= case happyOut17 happy_x_1 of { (HappyWrap17 happy_var_1) ->
case happyOut17 happy_x_3 of { (HappyWrap17 happy_var_3) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionAbstraction happy_var_1 happy_var_3
)}}
happyReduce_36 = happySpecReduce_3 13# happyReduction_36
happyReduction_36 happy_x_3
happy_x_2
happy_x_1
= case happyOut6 happy_x_2 of { (HappyWrap6 happy_var_2) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionImplicit happy_var_2
)}
happyReduce_37 = happySpecReduce_3 13# happyReduction_37
happyReduction_37 happy_x_3
happy_x_2
happy_x_1
= case happyOut6 happy_x_2 of { (HappyWrap6 happy_var_2) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionRecord happy_var_2
)}
happyReduce_38 = happySpecReduce_3 13# happyReduction_38
happyReduction_38 happy_x_3
happy_x_2
happy_x_1
= case happyOut6 happy_x_2 of { (HappyWrap6 happy_var_2) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionAlternative happy_var_2
)}
happyReduce_39 = happySpecReduce_3 13# happyReduction_39
happyReduction_39 happy_x_3
happy_x_2
happy_x_1
= case happyOut17 happy_x_2 of { (HappyWrap17 happy_var_2) ->
happyIn17
(Language.Lang.AbsGrammer.ExpressionParens happy_var_2
)}
happyNewToken action sts stk [] =
happyDoAction 17# notHappyAtAll action sts stk []
happyNewToken action sts stk (tk:tks) =
let cont i = happyDoAction i tk action sts stk tks in
case tk of {
PT _ (TS _ 1) -> cont 1#;
PT _ (TS _ 2) -> cont 2#;
PT _ (TS _ 3) -> cont 3#;
PT _ (TS _ 4) -> cont 4#;
PT _ (TS _ 5) -> cont 5#;
PT _ (TS _ 6) -> cont 6#;
PT _ (TS _ 7) -> cont 7#;
PT _ (TS _ 8) -> cont 8#;
PT _ (TS _ 9) -> cont 9#;
PT _ (TS _ 10) -> cont 10#;
PT _ (TS _ 11) -> cont 11#;
PT _ (TS _ 12) -> cont 12#;
PT _ (TS _ 13) -> cont 13#;
PT _ (TS _ 14) -> cont 14#;
PT _ (T_Name happy_dollar_dollar) -> cont 15#;
PT _ (T_NumericLiteral happy_dollar_dollar) -> cont 16#;
_ -> happyError' ((tk:tks), [])
}
happyError_ explist 17# tk tks = happyError' (tks, explist)
happyError_ explist _ tk tks = happyError' ((tk:tks), explist)
happyThen :: () => Either String a -> (a -> Either String b) -> Either String b
happyThen = ((>>=))
happyReturn :: () => a -> Either String a
happyReturn = (return)
happyThen1 m k tks = ((>>=)) m (\a -> k a tks)
happyReturn1 :: () => a -> b -> Either String a
happyReturn1 = \a tks -> (return) a
happyError' :: () => ([(Token)], [String]) -> Either String a
happyError' = (\(tokens, _) -> happyError tokens)
pModule tks = happySomeParser where
happySomeParser = happyThen (happyParse 0# tks) (\x -> happyReturn (let {(HappyWrap6 x') = happyOut6 x} in x'))
happySeq = happyDontSeq
happyError :: [Token] -> Either String a
happyError ts = Left $
"syntax error at " ++ tokenPos ts ++
case ts of
[] -> []
[Err _] -> " due to lexer error"
t:_ -> " before `" ++ (prToken t) ++ "'"
myLexer = tokens
{-# LINE 1 "templates/GenericTemplate.hs" #-}
-- $Id: GenericTemplate.hs,v 1.26 2005/01/14 14:47:22 simonmar Exp $
-- Do not remove this comment. Required to fix CPP parsing when using GCC and a clang-compiled alex.
#if __GLASGOW_HASKELL__ > 706
#define LT(n,m) ((Happy_GHC_Exts.tagToEnum# (n Happy_GHC_Exts.<# m)) :: Bool)
#define GTE(n,m) ((Happy_GHC_Exts.tagToEnum# (n Happy_GHC_Exts.>=# m)) :: Bool)
#define EQ(n,m) ((Happy_GHC_Exts.tagToEnum# (n Happy_GHC_Exts.==# m)) :: Bool)
#else
#define LT(n,m) (n Happy_GHC_Exts.<# m)
#define GTE(n,m) (n Happy_GHC_Exts.>=# m)
#define EQ(n,m) (n Happy_GHC_Exts.==# m)
#endif
data Happy_IntList = HappyCons Happy_GHC_Exts.Int# Happy_IntList
infixr 9 `HappyStk`
data HappyStk a = HappyStk a (HappyStk a)
-----------------------------------------------------------------------------
-- starting the parse
happyParse start_state = happyNewToken start_state notHappyAtAll notHappyAtAll
-----------------------------------------------------------------------------
-- Accepting the parse
-- If the current token is ERROR_TOK, it means we've just accepted a partial
-- parse (a %partial parser). We must ignore the saved token on the top of
-- the stack in this case.
happyAccept 0# tk st sts (_ `HappyStk` ans `HappyStk` _) =
happyReturn1 ans
happyAccept j tk st sts (HappyStk ans _) =
(happyTcHack j (happyTcHack st)) (happyReturn1 ans)
-----------------------------------------------------------------------------
-- Arrays only: do the next action
happyDoAction i tk st
= {- nothing -}
case action of
0# -> {- nothing -}
happyFail (happyExpListPerState ((Happy_GHC_Exts.I# (st)) :: Int)) i tk st
-1# -> {- nothing -}
happyAccept i tk st
n | LT(n,(0# :: Happy_GHC_Exts.Int#)) -> {- nothing -}
(happyReduceArr Happy_Data_Array.! rule) i tk st
where rule = (Happy_GHC_Exts.I# ((Happy_GHC_Exts.negateInt# ((n Happy_GHC_Exts.+# (1# :: Happy_GHC_Exts.Int#))))))
n -> {- nothing -}
happyShift new_state i tk st
where new_state = (n Happy_GHC_Exts.-# (1# :: Happy_GHC_Exts.Int#))
where off = happyAdjustOffset (indexShortOffAddr happyActOffsets st)
off_i = (off Happy_GHC_Exts.+# i)
check = if GTE(off_i,(0# :: Happy_GHC_Exts.Int#))
then EQ(indexShortOffAddr happyCheck off_i, i)
else False
action
| check = indexShortOffAddr happyTable off_i
| otherwise = indexShortOffAddr happyDefActions st
indexShortOffAddr (HappyA# arr) off =
Happy_GHC_Exts.narrow16Int# i
where
i = Happy_GHC_Exts.word2Int# (Happy_GHC_Exts.or# (Happy_GHC_Exts.uncheckedShiftL# high 8#) low)
high = Happy_GHC_Exts.int2Word# (Happy_GHC_Exts.ord# (Happy_GHC_Exts.indexCharOffAddr# arr (off' Happy_GHC_Exts.+# 1#)))
low = Happy_GHC_Exts.int2Word# (Happy_GHC_Exts.ord# (Happy_GHC_Exts.indexCharOffAddr# arr off'))
off' = off Happy_GHC_Exts.*# 2#
{-# INLINE happyLt #-}
happyLt x y = LT(x,y)
readArrayBit arr bit =
Bits.testBit (Happy_GHC_Exts.I# (indexShortOffAddr arr ((unbox_int bit) `Happy_GHC_Exts.iShiftRA#` 4#))) (bit `mod` 16)
where unbox_int (Happy_GHC_Exts.I# x) = x
data HappyAddr = HappyA# Happy_GHC_Exts.Addr#
-----------------------------------------------------------------------------
-- HappyState data type (not arrays)
-----------------------------------------------------------------------------
-- Shifting a token
happyShift new_state 0# tk st sts stk@(x `HappyStk` _) =
let i = (case Happy_GHC_Exts.unsafeCoerce# x of { (Happy_GHC_Exts.I# (i)) -> i }) in
-- trace "shifting the error token" $
happyDoAction i tk new_state (HappyCons (st) (sts)) (stk)
happyShift new_state i tk st sts stk =
happyNewToken new_state (HappyCons (st) (sts)) ((happyInTok (tk))`HappyStk`stk)
-- happyReduce is specialised for the common cases.
happySpecReduce_0 i fn 0# tk st sts stk
= happyFail [] 0# tk st sts stk
happySpecReduce_0 nt fn j tk st@((action)) sts stk
= happyGoto nt j tk st (HappyCons (st) (sts)) (fn `HappyStk` stk)
happySpecReduce_1 i fn 0# tk st sts stk
= happyFail [] 0# tk st sts stk
happySpecReduce_1 nt fn j tk _ sts@((HappyCons (st@(action)) (_))) (v1`HappyStk`stk')
= let r = fn v1 in
happySeq r (happyGoto nt j tk st sts (r `HappyStk` stk'))
happySpecReduce_2 i fn 0# tk st sts stk
= happyFail [] 0# tk st sts stk
happySpecReduce_2 nt fn j tk _ (HappyCons (_) (sts@((HappyCons (st@(action)) (_))))) (v1`HappyStk`v2`HappyStk`stk')
= let r = fn v1 v2 in
happySeq r (happyGoto nt j tk st sts (r `HappyStk` stk'))
happySpecReduce_3 i fn 0# tk st sts stk
= happyFail [] 0# tk st sts stk
happySpecReduce_3 nt fn j tk _ (HappyCons (_) ((HappyCons (_) (sts@((HappyCons (st@(action)) (_))))))) (v1`HappyStk`v2`HappyStk`v3`HappyStk`stk')
= let r = fn v1 v2 v3 in
happySeq r (happyGoto nt j tk st sts (r `HappyStk` stk'))
happyReduce k i fn 0# tk st sts stk
= happyFail [] 0# tk st sts stk
happyReduce k nt fn j tk st sts stk
= case happyDrop (k Happy_GHC_Exts.-# (1# :: Happy_GHC_Exts.Int#)) sts of
sts1@((HappyCons (st1@(action)) (_))) ->
let r = fn stk in -- it doesn't hurt to always seq here...
happyDoSeq r (happyGoto nt j tk st1 sts1 r)
happyMonadReduce k nt fn 0# tk st sts stk
= happyFail [] 0# tk st sts stk
happyMonadReduce k nt fn j tk st sts stk =
case happyDrop k (HappyCons (st) (sts)) of
sts1@((HappyCons (st1@(action)) (_))) ->
let drop_stk = happyDropStk k stk in
happyThen1 (fn stk tk) (\r -> happyGoto nt j tk st1 sts1 (r `HappyStk` drop_stk))
happyMonad2Reduce k nt fn 0# tk st sts stk
= happyFail [] 0# tk st sts stk
happyMonad2Reduce k nt fn j tk st sts stk =
case happyDrop k (HappyCons (st) (sts)) of
sts1@((HappyCons (st1@(action)) (_))) ->
let drop_stk = happyDropStk k stk
off = happyAdjustOffset (indexShortOffAddr happyGotoOffsets st1)
off_i = (off Happy_GHC_Exts.+# nt)
new_state = indexShortOffAddr happyTable off_i
in
happyThen1 (fn stk tk) (\r -> happyNewToken new_state sts1 (r `HappyStk` drop_stk))
happyDrop 0# l = l
happyDrop n (HappyCons (_) (t)) = happyDrop (n Happy_GHC_Exts.-# (1# :: Happy_GHC_Exts.Int#)) t
happyDropStk 0# l = l
happyDropStk n (x `HappyStk` xs) = happyDropStk (n Happy_GHC_Exts.-# (1#::Happy_GHC_Exts.Int#)) xs
-----------------------------------------------------------------------------
-- Moving to a new state after a reduction
happyGoto nt j tk st =
{- nothing -}
happyDoAction j tk new_state
where off = happyAdjustOffset (indexShortOffAddr happyGotoOffsets st)
off_i = (off Happy_GHC_Exts.+# nt)
new_state = indexShortOffAddr happyTable off_i
-----------------------------------------------------------------------------
-- Error recovery (ERROR_TOK is the error token)
-- parse error if we are in recovery and we fail again
happyFail explist 0# tk old_st _ stk@(x `HappyStk` _) =
let i = (case Happy_GHC_Exts.unsafeCoerce# x of { (Happy_GHC_Exts.I# (i)) -> i }) in
-- trace "failing" $
happyError_ explist i tk
{- We don't need state discarding for our restricted implementation of
"error". In fact, it can cause some bogus parses, so I've disabled it
for now --SDM
-- discard a state
happyFail ERROR_TOK tk old_st CONS(HAPPYSTATE(action),sts)
(saved_tok `HappyStk` _ `HappyStk` stk) =
-- trace ("discarding state, depth " ++ show (length stk)) $
DO_ACTION(action,ERROR_TOK,tk,sts,(saved_tok`HappyStk`stk))
-}
-- Enter error recovery: generate an error token,
-- save the old token and carry on.
happyFail explist i tk (action) sts stk =
-- trace "entering error recovery" $
happyDoAction 0# tk action sts ((Happy_GHC_Exts.unsafeCoerce# (Happy_GHC_Exts.I# (i))) `HappyStk` stk)
-- Internal happy errors:
notHappyAtAll :: a
notHappyAtAll = error "Internal Happy error\n"
-----------------------------------------------------------------------------
-- Hack to get the typechecker to accept our action functions
happyTcHack :: Happy_GHC_Exts.Int# -> a -> a
happyTcHack x y = y
{-# INLINE happyTcHack #-}
-----------------------------------------------------------------------------
-- Seq-ing. If the --strict flag is given, then Happy emits
-- happySeq = happyDoSeq
-- otherwise it emits
-- happySeq = happyDontSeq
happyDoSeq, happyDontSeq :: a -> b -> b
happyDoSeq a b = a `seq` b
happyDontSeq a b = b
-----------------------------------------------------------------------------
-- Don't inline any functions from the template. GHC has a nasty habit
-- of deciding to inline happyGoto everywhere, which increases the size of
-- the generated parser quite a bit.
{-# NOINLINE happyDoAction #-}
{-# NOINLINE happyTable #-}
{-# NOINLINE happyCheck #-}
{-# NOINLINE happyActOffsets #-}
{-# NOINLINE happyGotoOffsets #-}
{-# NOINLINE happyDefActions #-}
{-# NOINLINE happyShift #-}
{-# NOINLINE happySpecReduce_0 #-}
{-# NOINLINE happySpecReduce_1 #-}
{-# NOINLINE happySpecReduce_2 #-}
{-# NOINLINE happySpecReduce_3 #-}
{-# NOINLINE happyReduce #-}
{-# NOINLINE happyMonadReduce #-}
{-# NOINLINE happyGoto #-}
{-# NOINLINE happyFail #-}
-- end of Happy Template.

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,109 @@
-- This Happy file was machine-generated by the BNF converter
{
{-# OPTIONS_GHC -fno-warn-incomplete-patterns -fno-warn-overlapping-patterns #-}
module Language.Lang.ParGrammer where
import qualified Language.Lang.AbsGrammer
import Language.Lang.LexGrammer
}
%name pModule Module
-- no lexer declaration
%monad { Either String } { (>>=) } { return }
%tokentype {Token}
%token
'#}' { PT _ (TS _ 1) }
'(' { PT _ (TS _ 2) }
')' { PT _ (TS _ 3) }
',' { PT _ (TS _ 4) }
'->' { PT _ (TS _ 5) }
':' { PT _ (TS _ 6) }
'=' { PT _ (TS _ 7) }
'[' { PT _ (TS _ 8) }
']' { PT _ (TS _ 9) }
'{' { PT _ (TS _ 10) }
'{#' { PT _ (TS _ 11) }
'{{' { PT _ (TS _ 12) }
'}' { PT _ (TS _ 13) }
'}}' { PT _ (TS _ 14) }
L_Name { PT _ (T_Name $$) }
L_NumericLiteral { PT _ (T_NumericLiteral $$) }
%%
Name :: { Language.Lang.AbsGrammer.Name}
Name : L_Name { Language.Lang.AbsGrammer.Name $1 }
NumericLiteral :: { Language.Lang.AbsGrammer.NumericLiteral}
NumericLiteral : L_NumericLiteral { Language.Lang.AbsGrammer.NumericLiteral $1 }
Module :: { Language.Lang.AbsGrammer.Module }
Module : ListBinding { Language.Lang.AbsGrammer.Module $1 }
Binding :: { Language.Lang.AbsGrammer.Binding }
Binding : Declaration { Language.Lang.AbsGrammer.BindingDeclaration $1 }
| Definition { Language.Lang.AbsGrammer.BindingDefinition $1 }
| '{#' Pragma '#}' { Language.Lang.AbsGrammer.BindingPragma $2 }
Declaration :: { Language.Lang.AbsGrammer.Declaration }
Declaration : Name ':' Type { Language.Lang.AbsGrammer.DeclarationNamed $1 $3 }
| Type { Language.Lang.AbsGrammer.DeclarationAnonymous $1 }
Definition :: { Language.Lang.AbsGrammer.Definition }
Definition : Name '=' Expression { Language.Lang.AbsGrammer.DefinitionNamed $1 $3 }
| Expression { Language.Lang.AbsGrammer.DefinitionAnonymous $1 }
Type :: { Language.Lang.AbsGrammer.Type }
Type : Name { Language.Lang.AbsGrammer.TypeName $1 }
| Type Type { Language.Lang.AbsGrammer.TypeApplication $1 $2 }
| Type '->' Type { Language.Lang.AbsGrammer.TypeAbstraction $1 $3 }
| '{{' ListDeclaration '}}' { Language.Lang.AbsGrammer.TypeImplicit $2 }
| '{' ListDeclaration '}' { Language.Lang.AbsGrammer.TypeRecord $2 }
| '[' ListDeclaration ']' { Language.Lang.AbsGrammer.TypeAlternative $2 }
| '(' Type ')' { Language.Lang.AbsGrammer.TypeParens $2 }
Pragma :: { Language.Lang.AbsGrammer.Pragma }
Pragma : ListName { Language.Lang.AbsGrammer.Pragma $1 }
ListBinding :: { [Language.Lang.AbsGrammer.Binding] }
ListBinding : {- empty -} { [] }
| Binding { (:[]) $1 }
| Binding ',' ListBinding { (:) $1 $3 }
ListDeclaration :: { [Language.Lang.AbsGrammer.Declaration] }
ListDeclaration : {- empty -} { [] }
| Declaration { (:[]) $1 }
| Declaration ',' ListDeclaration { (:) $1 $3 }
ListDefinition :: { [Language.Lang.AbsGrammer.Definition] }
ListDefinition : {- empty -} { [] }
| Definition { (:[]) $1 }
| Definition ',' ListDefinition { (:) $1 $3 }
ListType :: { [Language.Lang.AbsGrammer.Type] }
ListType : Type { (:[]) $1 } | Type ',' ListType { (:) $1 $3 }
ListName :: { [Language.Lang.AbsGrammer.Name] }
ListName : Name { (:[]) $1 } | Name ListName { (:) $1 $2 }
Expression :: { Language.Lang.AbsGrammer.Expression }
Expression : Name { Language.Lang.AbsGrammer.ExpressionName $1 }
| NumericLiteral { Language.Lang.AbsGrammer.ExpressionLiteral $1 }
| Expression Expression { Language.Lang.AbsGrammer.ExpressionApplication $1 $2 }
| Expression '->' Expression { Language.Lang.AbsGrammer.ExpressionAbstraction $1 $3 }
| '{{' Module '}}' { Language.Lang.AbsGrammer.ExpressionImplicit $2 }
| '{' Module '}' { Language.Lang.AbsGrammer.ExpressionRecord $2 }
| '[' Module ']' { Language.Lang.AbsGrammer.ExpressionAlternative $2 }
| '(' Expression ')' { Language.Lang.AbsGrammer.ExpressionParens $2 }
{
happyError :: [Token] -> Either String a
happyError ts = Left $
"syntax error at " ++ tokenPos ts ++
case ts of
[] -> []
[Err _] -> " due to lexer error"
t:_ -> " before `" ++ (prToken t) ++ "'"
myLexer = tokens
}

View file

@ -0,0 +1,178 @@
{-# LANGUAGE CPP #-}
#if __GLASGOW_HASKELL__ <= 708
{-# LANGUAGE OverlappingInstances #-}
#endif
{-# LANGUAGE FlexibleInstances #-}
{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}
-- | Pretty-printer for Language.
-- Generated by the BNF converter.
module Language.Lang.PrintGrammer where
import qualified Language.Lang.AbsGrammer
import Data.Char
-- | The top-level printing method.
printTree :: Print a => a -> String
printTree = render . prt 0
type Doc = [ShowS] -> [ShowS]
doc :: ShowS -> Doc
doc = (:)
render :: Doc -> String
render d = rend 0 (map ($ "") $ d []) "" where
rend i ss = case ss of
"[" :ts -> showChar '[' . rend i ts
"(" :ts -> showChar '(' . rend i ts
"{" :ts -> showChar '{' . new (i+1) . rend (i+1) ts
"}" : ";":ts -> new (i-1) . space "}" . showChar ';' . new (i-1) . rend (i-1) ts
"}" :ts -> new (i-1) . showChar '}' . new (i-1) . rend (i-1) ts
[";"] -> showChar ';'
";" :ts -> showChar ';' . new i . rend i ts
t : ts@(p:_) | closingOrPunctuation p -> showString t . rend i ts
t :ts -> space t . rend i ts
_ -> id
new i = showChar '\n' . replicateS (2*i) (showChar ' ') . dropWhile isSpace
space t s =
case (all isSpace t', null spc, null rest) of
(True , _ , True ) -> [] -- remove trailing space
(False, _ , True ) -> t' -- remove trailing space
(False, True, False) -> t' ++ ' ' : s -- add space if none
_ -> t' ++ s
where
t' = showString t []
(spc, rest) = span isSpace s
closingOrPunctuation :: String -> Bool
closingOrPunctuation [c] = c `elem` closerOrPunct
closingOrPunctuation _ = False
closerOrPunct :: String
closerOrPunct = ")],;"
parenth :: Doc -> Doc
parenth ss = doc (showChar '(') . ss . doc (showChar ')')
concatS :: [ShowS] -> ShowS
concatS = foldr (.) id
concatD :: [Doc] -> Doc
concatD = foldr (.) id
replicateS :: Int -> ShowS -> ShowS
replicateS n f = concatS (replicate n f)
-- | The printer class does the job.
class Print a where
prt :: Int -> a -> Doc
prtList :: Int -> [a] -> Doc
prtList i = concatD . map (prt i)
instance {-# OVERLAPPABLE #-} Print a => Print [a] where
prt = prtList
instance Print Char where
prt _ s = doc (showChar '\'' . mkEsc '\'' s . showChar '\'')
prtList _ s = doc (showChar '"' . concatS (map (mkEsc '"') s) . showChar '"')
mkEsc :: Char -> Char -> ShowS
mkEsc q s = case s of
_ | s == q -> showChar '\\' . showChar s
'\\'-> showString "\\\\"
'\n' -> showString "\\n"
'\t' -> showString "\\t"
_ -> showChar s
prPrec :: Int -> Int -> Doc -> Doc
prPrec i j = if j < i then parenth else id
instance Print Integer where
prt _ x = doc (shows x)
instance Print Double where
prt _ x = doc (shows x)
instance Print Language.Lang.AbsGrammer.Name where
prt _ (Language.Lang.AbsGrammer.Name i) = doc $ showString $ i
prtList _ [x] = concatD [prt 0 x, doc (showString " ")]
prtList _ (x:xs) = concatD [prt 0 x, doc (showString " "), prt 0 xs]
instance Print Language.Lang.AbsGrammer.NumericLiteral where
prt _ (Language.Lang.AbsGrammer.NumericLiteral i) = doc $ showString $ i
instance Print Language.Lang.AbsGrammer.Module where
prt i e = case e of
Language.Lang.AbsGrammer.Module bindings -> prPrec i 0 (concatD [prt 0 bindings])
instance Print Language.Lang.AbsGrammer.Binding where
prt i e = case e of
Language.Lang.AbsGrammer.BindingDeclaration declaration -> prPrec i 0 (concatD [prt 0 declaration])
Language.Lang.AbsGrammer.BindingDefinition definition -> prPrec i 0 (concatD [prt 0 definition])
Language.Lang.AbsGrammer.BindingPragma pragma -> prPrec i 0 (concatD [doc (showString "{#"), prt 0 pragma, doc (showString "#}")])
prtList _ [] = concatD []
prtList _ [x] = concatD [prt 0 x]
prtList _ (x:xs) = concatD [prt 0 x, doc (showString ","), prt 0 xs]
instance Print Language.Lang.AbsGrammer.Declaration where
prt i e = case e of
Language.Lang.AbsGrammer.DeclarationNamed name type_ -> prPrec i 0 (concatD [prt 0 name, doc (showString ":"), prt 0 type_])
Language.Lang.AbsGrammer.DeclarationAnonymous type_ -> prPrec i 0 (concatD [prt 0 type_])
prtList _ [] = concatD []
prtList _ [x] = concatD [prt 0 x]
prtList _ (x:xs) = concatD [prt 0 x, doc (showString ","), prt 0 xs]
instance Print Language.Lang.AbsGrammer.Definition where
prt i e = case e of
Language.Lang.AbsGrammer.DefinitionNamed name expression -> prPrec i 0 (concatD [prt 0 name, doc (showString "="), prt 0 expression])
Language.Lang.AbsGrammer.DefinitionAnonymous expression -> prPrec i 0 (concatD [prt 0 expression])
prtList _ [] = concatD []
prtList _ [x] = concatD [prt 0 x]
prtList _ (x:xs) = concatD [prt 0 x, doc (showString ","), prt 0 xs]
instance Print Language.Lang.AbsGrammer.Type where
prt i e = case e of
Language.Lang.AbsGrammer.TypeName name -> prPrec i 0 (concatD [prt 0 name])
Language.Lang.AbsGrammer.TypeApplication type_1 type_2 -> prPrec i 0 (concatD [prt 0 type_1, prt 0 type_2])
Language.Lang.AbsGrammer.TypeAbstraction type_1 type_2 -> prPrec i 0 (concatD [prt 0 type_1, doc (showString "->"), prt 0 type_2])
Language.Lang.AbsGrammer.TypeImplicit declarations -> prPrec i 0 (concatD [doc (showString "{{"), prt 0 declarations, doc (showString "}}")])
Language.Lang.AbsGrammer.TypeRecord declarations -> prPrec i 0 (concatD [doc (showString "{"), prt 0 declarations, doc (showString "}")])
Language.Lang.AbsGrammer.TypeAlternative declarations -> prPrec i 0 (concatD [doc (showString "["), prt 0 declarations, doc (showString "]")])
Language.Lang.AbsGrammer.TypeParens type_ -> prPrec i 0 (concatD [doc (showString "("), prt 0 type_, doc (showString ")")])
prtList _ [x] = concatD [prt 0 x]
prtList _ (x:xs) = concatD [prt 0 x, doc (showString ","), prt 0 xs]
instance Print Language.Lang.AbsGrammer.Pragma where
prt i e = case e of
Language.Lang.AbsGrammer.Pragma names -> prPrec i 0 (concatD [prt 0 names])
instance Print [Language.Lang.AbsGrammer.Binding] where
prt = prtList
instance Print [Language.Lang.AbsGrammer.Declaration] where
prt = prtList
instance Print [Language.Lang.AbsGrammer.Definition] where
prt = prtList
instance Print [Language.Lang.AbsGrammer.Type] where
prt = prtList
instance Print [Language.Lang.AbsGrammer.Name] where
prt = prtList
instance Print Language.Lang.AbsGrammer.Expression where
prt i e = case e of
Language.Lang.AbsGrammer.ExpressionName name -> prPrec i 0 (concatD [prt 0 name])
Language.Lang.AbsGrammer.ExpressionLiteral numericliteral -> prPrec i 0 (concatD [prt 0 numericliteral])
Language.Lang.AbsGrammer.ExpressionApplication expression1 expression2 -> prPrec i 0 (concatD [prt 0 expression1, prt 0 expression2])
Language.Lang.AbsGrammer.ExpressionAbstraction expression1 expression2 -> prPrec i 0 (concatD [prt 0 expression1, doc (showString "->"), prt 0 expression2])
Language.Lang.AbsGrammer.ExpressionImplicit module_ -> prPrec i 0 (concatD [doc (showString "{{"), prt 0 module_, doc (showString "}}")])
Language.Lang.AbsGrammer.ExpressionRecord module_ -> prPrec i 0 (concatD [doc (showString "{"), prt 0 module_, doc (showString "}")])
Language.Lang.AbsGrammer.ExpressionAlternative module_ -> prPrec i 0 (concatD [doc (showString "["), prt 0 module_, doc (showString "]")])
Language.Lang.AbsGrammer.ExpressionParens expression -> prPrec i 0 (concatD [doc (showString "("), prt 0 expression, doc (showString ")")])

View file

@ -0,0 +1,57 @@
-- Haskell module generated by the BNF converter
module Language.Lang.SkelGrammer where
import qualified Language.Lang.AbsGrammer
type Err = Either String
type Result = Err String
failure :: Show a => a -> Result
failure x = Left $ "Undefined case: " ++ show x
transName :: Language.Lang.AbsGrammer.Name -> Result
transName x = case x of
Language.Lang.AbsGrammer.Name string -> failure x
transNumericLiteral :: Language.Lang.AbsGrammer.NumericLiteral -> Result
transNumericLiteral x = case x of
Language.Lang.AbsGrammer.NumericLiteral string -> failure x
transModule :: Language.Lang.AbsGrammer.Module -> Result
transModule x = case x of
Language.Lang.AbsGrammer.Module bindings -> failure x
transBinding :: Language.Lang.AbsGrammer.Binding -> Result
transBinding x = case x of
Language.Lang.AbsGrammer.BindingDeclaration declaration -> failure x
Language.Lang.AbsGrammer.BindingDefinition definition -> failure x
Language.Lang.AbsGrammer.BindingPragma pragma -> failure x
transDeclaration :: Language.Lang.AbsGrammer.Declaration -> Result
transDeclaration x = case x of
Language.Lang.AbsGrammer.DeclarationNamed name type_ -> failure x
Language.Lang.AbsGrammer.DeclarationAnonymous type_ -> failure x
transDefinition :: Language.Lang.AbsGrammer.Definition -> Result
transDefinition x = case x of
Language.Lang.AbsGrammer.DefinitionNamed name expression -> failure x
Language.Lang.AbsGrammer.DefinitionAnonymous expression -> failure x
transType :: Language.Lang.AbsGrammer.Type -> Result
transType x = case x of
Language.Lang.AbsGrammer.TypeName name -> failure x
Language.Lang.AbsGrammer.TypeApplication type_1 type_2 -> failure x
Language.Lang.AbsGrammer.TypeAbstraction type_1 type_2 -> failure x
Language.Lang.AbsGrammer.TypeImplicit declarations -> failure x
Language.Lang.AbsGrammer.TypeRecord declarations -> failure x
Language.Lang.AbsGrammer.TypeAlternative declarations -> failure x
Language.Lang.AbsGrammer.TypeParens type_ -> failure x
transPragma :: Language.Lang.AbsGrammer.Pragma -> Result
transPragma x = case x of
Language.Lang.AbsGrammer.Pragma names -> failure x
transExpression :: Language.Lang.AbsGrammer.Expression -> Result
transExpression x = case x of
Language.Lang.AbsGrammer.ExpressionName name -> failure x
Language.Lang.AbsGrammer.ExpressionLiteral numericliteral -> failure x
Language.Lang.AbsGrammer.ExpressionApplication expression1 expression2 -> failure x
Language.Lang.AbsGrammer.ExpressionAbstraction expression1 expression2 -> failure x
Language.Lang.AbsGrammer.ExpressionImplicit module_ -> failure x
Language.Lang.AbsGrammer.ExpressionRecord module_ -> failure x
Language.Lang.AbsGrammer.ExpressionAlternative module_ -> failure x
Language.Lang.AbsGrammer.ExpressionParens expression -> failure x

View file

@ -0,0 +1,70 @@
-- Program to test parser, automatically generated by BNF Converter.
module Main where
import System.Environment ( getArgs, getProgName )
import System.Exit ( exitFailure, exitSuccess )
import Control.Monad ( when )
import Language.Lang.LexGrammer ( Token )
import Language.Lang.ParGrammer ( pModule, myLexer )
import Language.Lang.SkelGrammer ()
import Language.Lang.PrintGrammer ( Print, printTree )
import Language.Lang.AbsGrammer ()
type Err = Either String
type ParseFun a = [Token] -> Err a
myLLexer = myLexer
type Verbosity = Int
putStrV :: Verbosity -> String -> IO ()
putStrV v s = when (v > 1) $ putStrLn s
runFile :: (Print a, Show a) => Verbosity -> ParseFun a -> FilePath -> IO ()
runFile v p f = putStrLn f >> readFile f >>= run v p
run :: (Print a, Show a) => Verbosity -> ParseFun a -> String -> IO ()
run v p s = case p ts of
Left s -> do
putStrLn "\nParse Failed...\n"
putStrV v "Tokens:"
putStrV v $ show ts
putStrLn s
exitFailure
Right tree -> do
putStrLn "\nParse Successful!"
showTree v tree
exitSuccess
where
ts = myLLexer s
showTree :: (Show a, Print a) => Int -> a -> IO ()
showTree v tree
= do
putStrV v $ "\n[Abstract Syntax]\n\n" ++ show tree
putStrV v $ "\n[Linearized tree]\n\n" ++ printTree tree
usage :: IO ()
usage = do
putStrLn $ unlines
[ "usage: Call with one of the following argument combinations:"
, " --help Display this help message."
, " (no arguments) Parse stdin verbosely."
, " (files) Parse content of files verbosely."
, " -s (files) Silent mode. Parse content of files silently."
]
exitFailure
main :: IO ()
main = do
args <- getArgs
case args of
["--help"] -> usage
[] -> getContents >>= run 2 pModule
"-s":fs -> mapM_ (runFile 0 pModule) fs
fs -> mapM_ (runFile 2 pModule) fs

31
src/Makefile Normal file
View file

@ -0,0 +1,31 @@
# Makefile generated by BNFC.
# List of goals not corresponding to file names.
.PHONY : all clean distclean
# Default goal.
all : Language/Lang/TestGrammer
# Rules for building the parser.
%.hs : %.y
happy --ghc --coerce --array --info $<
%.hs : %.x
alex --ghc $<
Language/Lang/TestGrammer : Language/Lang/TestGrammer.hs Language/Lang/ErrM.hs Language/Lang/LexGrammer.hs Language/Lang/ParGrammer.hs Language/Lang/PrintGrammer.hs
ghc --make $< -o $@
# Rules for cleaning generated files.
clean :
-rm -f Language/Lang/*.hi Language/Lang/*.o Language/Lang/*.log Language/Lang/*.aux Language/Lang/*.dvi
distclean : clean
-rm -f Language/Lang/AbsGrammer.hs Language/Lang/AbsGrammer.hs.bak Language/Lang/ComposOp.hs Language/Lang/ComposOp.hs.bak Language/Lang/DocGrammer.txt Language/Lang/DocGrammer.txt.bak Language/Lang/ErrM.hs Language/Lang/ErrM.hs.bak Language/Lang/LayoutGrammer.hs Language/Lang/LayoutGrammer.hs.bak Language/Lang/LexGrammer.x Language/Lang/LexGrammer.x.bak Language/Lang/ParGrammer.y Language/Lang/ParGrammer.y.bak Language/Lang/PrintGrammer.hs Language/Lang/PrintGrammer.hs.bak Language/Lang/SharedString.hs Language/Lang/SharedString.hs.bak Language/Lang/SkelGrammer.hs Language/Lang/SkelGrammer.hs.bak Language/Lang/TestGrammer.hs Language/Lang/TestGrammer.hs.bak Language/Lang/XMLGrammer.hs Language/Lang/XMLGrammer.hs.bak Language/Lang/ASTGrammer.agda Language/Lang/ASTGrammer.agda.bak Language/Lang/ParserGrammer.agda Language/Lang/ParserGrammer.agda.bak Language/Lang/IOLib.agda Language/Lang/IOLib.agda.bak Language/Lang/Main.agda Language/Lang/Main.agda.bak Language/Lang/grammer.dtd Language/Lang/grammer.dtd.bak Language/Lang/TestGrammer Language/Lang/LexGrammer.hs Language/Lang/ParGrammer.hs Language/Lang/ParGrammer.info Language/Lang/ParDataGrammer.hs Makefile
-rmdir -p Language/Lang/
# EOF