about summary refs log tree commit diff
path: root/users/wpcarro/scratch/simple-select/main.py
blob: 3bcf85e5efdfe234d0e4a9795d5da844bcff8a72 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import string
from scanner import Scanner
from parser import Parser

################################################################################
# Predicates
################################################################################

def is_alpha(c):
  return c in string.ascii_letters

def is_digit(c):
  return c in "0123456789"

def is_alphanumeric(c):
  return is_alpha(c) or is_digit(c)

def is_whitespace(c):
  return c in " \r\t\n"

################################################################################
# Tokenizer
################################################################################

AND    = ("CONJUNCTION", "AND")
OR     = ("CONJUNCTION", "OR")
NOT    = ("PUNCTUATION", "NOT")
COLON  = ("PUNCTUATION", "COLON")
LPAREN = ("PUNCTUATION", "LPAREN")
RPAREN = ("PUNCTUATION", "RPAREN")

def tokenize(x):
  s = Scanner(x)
  tokens = scan_tokens(s)
  return tokens

def scan_tokens(s):
  result = []
  while not s.exhausted():
    if is_whitespace(s.peek()):
      s.advance()
    else:
      result.append(scan_token(s))
  return result

def scan_token(s):
  punctuation = {
      "-": NOT,
      ":": COLON,
      "(": LPAREN,
      ")": RPAREN,
  }
  c = s.peek()
  if c in punctuation:
    s.advance()
    return punctuation[c]
  if c == "\"":
    return tokenize_string(s)
  if c == "/":
    return tokenize_regex(s)
  if is_alpha(c):
    return tokenize_identifier(s)

def tokenize_string(s):
  s.advance() # ignore opening 2x-quote
  current = ""
  while s.peek() != "\"" and not s.exhausted():
    current += s.advance()
  if s.exhausted():
    raise Exception("Unterminated string")
  s.advance() # ignore closing 2x-quote
  return ("STRING", current)

def tokenize_regex(s):
  s.advance() # ignore opening forward-slash
  current = ""
  while s.peek() != "/" and not s.exhausted():
    current += s.advance()
  if s.exhausted():
    raise Exception("Unterminated regex")
  s.advance() # ignore closing forward-slash
  return ("REGEX", current)

def tokenize_identifier(s):
  conjunctions = {
      "AND",
      "OR",
  }
  current = s.advance()
  while is_alphanumeric(s.peek()):
    current += s.advance()
  if current.upper() in conjunctions:
    return ("CONJUNCTION", current.upper())
  else:
    return ("IDENTIFIER", current)

################################################################################
# Parser
################################################################################

# EBNF
# Note: we order expression types by ascending levels of precedence.
#
# expression  -> conjunction ;
# conjunction -> selection ( ( "AND" | "OR" )? selection )* ;
# selection   -> "-"? IDENTIFIER ":" ( REGEX | STRING ) | grouping ;
# grouping    -> REGEX | STRING | "(" expression ")" ;

def parse(x):
  tokens = tokenize(x)
  p = Parser(tokens)
  return expression(p)

def expression(p):
  return conjunction(p)

def conjunction(p):
  lhs = selection(p)

  while not p.exhausted() and p.test(lambda tokens, i: tokens[i] not in {LPAREN, RPAREN}):
    conj = p.advance() if p.peek()[0] == "CONJUNCTION" else AND
    rhs = selection(p)
    lhs = ("CONJUNCTION", conj[1], lhs, rhs)

  if not p.exhausted():
    raise Exception("Encountered more tokens than we can parse: \"{}\"".format(p.tokens[p.i:]))

  return lhs

def selection(p):
  negate = False
  if p.peek() == NOT:
    negate = True
    p.advance()

  if p.peek()[0] != "IDENTIFIER":
    return grouping(p)

  ident = p.expect(lambda x: x[0] == "IDENTIFIER")
  colon = p.expect(lambda x: x[1] == "COLON")
  value = p.expect(lambda x: x[0] in {"REGEX", "STRING"})
  return ("SELECTION", negate, ident[1], value)

def grouping(p):
  if p.peek()[0] == "REGEX":
    return p.advance()

  if p.peek()[0] == "STRING":
    return p.advance()

  if p.peek() == LPAREN:
    p.advance()
    expr = expression(p)
    p.expect(lambda x: x == RPAREN)
    return ("GROUPING", expr)

################################################################################
# Main
################################################################################

def main():
  while True:
    x = input("> ")
    print(parse(x))

if __name__ == "__main__":
  main()