mirror of
https://github.com/Ed94/LangStudies.git
synced 2025-06-14 19:11:46 -07:00
Made a UI for results, BAPFS - Lecture 4, 5, and 6 done.
This commit is contained in:
61
Editor/Lectures/Lecture.1.gd
Normal file
61
Editor/Lectures/Lecture.1.gd
Normal file
@ -0,0 +1,61 @@
|
||||
extends Node
|
||||
|
||||
# This closesly follows the source provided in the lectures.
|
||||
# Later on after the lectures are complete or when I deem
|
||||
# Necessary there will be heavy refactors.
|
||||
class SyntaxNode:
|
||||
var Type : String
|
||||
var Value : int
|
||||
|
||||
func Dictionary():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class LetterParser:
|
||||
var Str : String
|
||||
|
||||
# NumericLiteral
|
||||
# : NUMBER
|
||||
# ;
|
||||
#
|
||||
func NumericLiteral():
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = "NumericLiteral"
|
||||
node.Value = int(self.Str)
|
||||
|
||||
return node
|
||||
|
||||
# Parses the text program description into an AST.
|
||||
func Parse(programDescription):
|
||||
self.Str = programDescription
|
||||
|
||||
return NumericLiteral()
|
||||
|
||||
|
||||
var ProgramDescription = "7"
|
||||
var LParser = LetterParser.new()
|
||||
|
||||
# Note: _ready is being used for Program func of the lectures.
|
||||
# Main Entry point.
|
||||
#
|
||||
# Program
|
||||
# : NumericLiteral
|
||||
# ;
|
||||
#
|
||||
func _ready():
|
||||
var ast = LParser.Parse(ProgramDescription)
|
||||
|
||||
print(to_json(ast.Dictionary()))
|
||||
|
||||
|
||||
# Called every frame. 'delta' is the elapsed time since the previous frame.
|
||||
#func _process(delta):
|
||||
# pass
|
||||
|
||||
|
||||
|
210
Editor/Lectures/Lecture.2.gd
Normal file
210
Editor/Lectures/Lecture.2.gd
Normal file
@ -0,0 +1,210 @@
|
||||
extends Node
|
||||
|
||||
# This closesly follows the source provided in the lectures.
|
||||
# Later on after the lectures are complete or when I deem
|
||||
# Necessary there will be heavy refactors.
|
||||
|
||||
enum TokenTypes \
|
||||
{
|
||||
Token_Number,
|
||||
Token_String
|
||||
}
|
||||
|
||||
const StrTokenTypes = \
|
||||
{
|
||||
Token_Number = "Number",
|
||||
Token_String = "String"
|
||||
}
|
||||
|
||||
class Token:
|
||||
var Type : String
|
||||
var Value : String
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class Tokenizer:
|
||||
var SrcTxt : String
|
||||
var Cursor : int;
|
||||
|
||||
# Sets up the tokenizer with the program source text.
|
||||
func init(programSrcText):
|
||||
SrcTxt = programSrcText
|
||||
Cursor = 0
|
||||
|
||||
# Provides the next token in the source text.
|
||||
func next_Token():
|
||||
if self.reached_EndOfTxt() == true :
|
||||
return null
|
||||
|
||||
var token = self.SrcTxt.substr(Cursor)
|
||||
|
||||
# Numbers
|
||||
if token[self.Cursor].is_valid_integer() :
|
||||
var \
|
||||
numberTok = Token.new()
|
||||
numberTok.Type = "Number"
|
||||
numberTok.Value = ""
|
||||
|
||||
while token.length() > self.Cursor && token[self.Cursor].is_valid_integer() :
|
||||
numberTok.Value += token[self.Cursor]
|
||||
self.Cursor += 1
|
||||
|
||||
return numberTok
|
||||
|
||||
# String:
|
||||
if token[self.Cursor] == '"' :
|
||||
var \
|
||||
stringTok = Token.new()
|
||||
stringTok.Type = "String"
|
||||
stringTok.Value = "\""
|
||||
|
||||
self.Cursor += 1
|
||||
|
||||
while token.length() > self.Cursor :
|
||||
stringTok.Value += token[self.Cursor]
|
||||
self.Cursor += 1
|
||||
|
||||
return stringTok
|
||||
|
||||
return null
|
||||
|
||||
func reached_EndOfTxt():
|
||||
return self.Cursor >= ( self.SrcTxt.length() - 1 )
|
||||
|
||||
var GTokenizer = Tokenizer.new()
|
||||
|
||||
|
||||
class SyntaxNode:
|
||||
var Type : String
|
||||
var Value # Not specifing a type implicity declares a Variant type.
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class ProgramNode:
|
||||
var Type : String
|
||||
var Body : Object
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Body = self.Body.toDict()
|
||||
}
|
||||
return result
|
||||
|
||||
class Parser:
|
||||
var TokenizerRef : Tokenizer
|
||||
var NextToken : Token
|
||||
|
||||
func eat(tokenType):
|
||||
var currToken = self.NextToken
|
||||
|
||||
assert(currToken != null, "eat: NextToken was null")
|
||||
|
||||
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
|
||||
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
|
||||
|
||||
assert(currToken.Type == tokenType, assertStr)
|
||||
|
||||
self.NextToken = self.TokenizerRef.next_Token()
|
||||
|
||||
return currToken
|
||||
|
||||
# Literal
|
||||
# : NumericLiteral
|
||||
# : StringLiteral
|
||||
# ;
|
||||
func parse_Literal():
|
||||
match self.NextToken.Type :
|
||||
"Number":
|
||||
return parse_NumericLiteral()
|
||||
"String":
|
||||
return parse_StringLiteral()
|
||||
|
||||
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
|
||||
|
||||
# NumericLiteral
|
||||
# : Number
|
||||
# ;
|
||||
#
|
||||
func parse_NumericLiteral():
|
||||
var Token = self.eat("Number")
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = "NumericLiteral"
|
||||
node.Value = int( Token.Value )
|
||||
|
||||
return node
|
||||
|
||||
# StringLiteral
|
||||
# : String
|
||||
# ;
|
||||
#
|
||||
func parse_StringLiteral():
|
||||
var Token = self.eat("String")
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = "StringLiteral"
|
||||
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
|
||||
|
||||
return node
|
||||
|
||||
# Program
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Program():
|
||||
var \
|
||||
node = ProgramNode.new()
|
||||
node.Type = "Program"
|
||||
node.Body = parse_Literal()
|
||||
|
||||
return node
|
||||
|
||||
# Parses the text program description into an AST.
|
||||
func parse(TokenizerRef):
|
||||
self.TokenizerRef = TokenizerRef
|
||||
|
||||
NextToken = TokenizerRef.next_Token()
|
||||
|
||||
return parse_Program()
|
||||
|
||||
var GParser = Parser.new()
|
||||
|
||||
|
||||
|
||||
# Main Entry point.
|
||||
func _ready():
|
||||
# Numerical test
|
||||
var ProgramDescription = "47"
|
||||
GTokenizer.init(ProgramDescription)
|
||||
|
||||
var ast = GParser.parse(GTokenizer)
|
||||
print(JSON.print(ast.toDict(), "\t"))
|
||||
|
||||
# String Test
|
||||
ProgramDescription = "\"hello\""
|
||||
GTokenizer.init(ProgramDescription)
|
||||
|
||||
ast = GParser.parse(GTokenizer)
|
||||
print(JSON.print(ast.toDict(), "\t"))
|
||||
|
||||
|
||||
# Called every frame. 'delta' is the elapsed time since the previous frame.
|
||||
#func _process(delta):
|
||||
# pass
|
||||
|
||||
|
||||
|
264
Editor/Lectures/Lecture.3..gd
Normal file
264
Editor/Lectures/Lecture.3..gd
Normal file
@ -0,0 +1,264 @@
|
||||
extends Node
|
||||
|
||||
# This closesly follows the source provided in the lectures.
|
||||
# Later on after the lectures are complete or when I deem
|
||||
# Necessary there will be heavy refactors.
|
||||
|
||||
const TokenType = \
|
||||
{
|
||||
Program = "Program",
|
||||
|
||||
# Comments
|
||||
CommentLine = "CommentLine",
|
||||
CommentMultiLine = "CommentMultiLine",
|
||||
|
||||
# Formatting
|
||||
Whitespace = "Whitespace",
|
||||
|
||||
# Literals
|
||||
Number = "Number",
|
||||
String = "String"
|
||||
}
|
||||
|
||||
const TokenSpec = \
|
||||
{
|
||||
TokenType.CommentLine : "^\/\/.*",
|
||||
TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/",
|
||||
TokenType.Whitespace : "^\\s+",
|
||||
TokenType.Number : "\\d+",
|
||||
TokenType.String : "^\"[^\"]*\""
|
||||
}
|
||||
|
||||
class Token:
|
||||
var Type : String
|
||||
var Value : String
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class Tokenizer:
|
||||
var SrcTxt : String
|
||||
var Cursor : int;
|
||||
|
||||
# Sets up the tokenizer with the program source text.
|
||||
func init(programSrcText):
|
||||
SrcTxt = programSrcText
|
||||
Cursor = 0
|
||||
|
||||
# Provides the next token in the source text.
|
||||
func next_Token():
|
||||
if self.reached_EndOfTxt() == true :
|
||||
return null
|
||||
|
||||
var srcLeft = self.SrcTxt.substr(Cursor)
|
||||
var regex = RegEx.new()
|
||||
var token = Token.new()
|
||||
|
||||
for type in TokenSpec :
|
||||
regex.compile(TokenSpec[type])
|
||||
|
||||
var result = regex.search(srcLeft)
|
||||
if result == null :
|
||||
continue
|
||||
|
||||
# Skip Comments
|
||||
if type == TokenType.CommentLine || type == TokenType.CommentMultiLine :
|
||||
self.Cursor += result.get_string().length()
|
||||
return next_Token()
|
||||
|
||||
# Skip Whitespace
|
||||
if type == TokenType.Whitespace :
|
||||
var addVal = result.get_string().length()
|
||||
self.Cursor += addVal
|
||||
|
||||
return next_Token()
|
||||
|
||||
token.Type = type
|
||||
token.Value = result.get_string()
|
||||
self.Cursor += ( result.get_string().length() -1 )
|
||||
|
||||
return token
|
||||
|
||||
var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}"
|
||||
var assertStr = assertStrTmplt.format({"value" : self.Cursor})
|
||||
assert(true != true, assertStr)
|
||||
return null
|
||||
|
||||
func reached_EndOfTxt():
|
||||
return self.Cursor >= ( self.SrcTxt.length() - 1 )
|
||||
|
||||
var GTokenizer = Tokenizer.new()
|
||||
|
||||
|
||||
class SyntaxNode:
|
||||
var Type : String
|
||||
var Value # Not specifing a type implicity declares a Variant type.
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class ProgramNode:
|
||||
var Type : String
|
||||
var Body : Object
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Body = self.Body.toDict()
|
||||
}
|
||||
return result
|
||||
|
||||
class Parser:
|
||||
var TokenizerRef : Tokenizer
|
||||
var NextToken : Token
|
||||
|
||||
func eat(tokenType):
|
||||
var currToken = self.NextToken
|
||||
|
||||
assert(currToken != null, "eat: NextToken was null")
|
||||
|
||||
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
|
||||
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
|
||||
|
||||
assert(currToken.Type == tokenType, assertStr)
|
||||
|
||||
self.NextToken = self.TokenizerRef.next_Token()
|
||||
|
||||
return currToken
|
||||
|
||||
# Literal
|
||||
# : NumericLiteral
|
||||
# : StringLiteral
|
||||
# ;
|
||||
func parse_Literal():
|
||||
match self.NextToken.Type :
|
||||
TokenType.Number:
|
||||
return parse_NumericLiteral()
|
||||
TokenType.String:
|
||||
return parse_StringLiteral()
|
||||
|
||||
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
|
||||
|
||||
# NumericLiteral
|
||||
# : Number
|
||||
# ;
|
||||
#
|
||||
func parse_NumericLiteral():
|
||||
var Token = self.eat(TokenType.Number)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = TokenType.Number
|
||||
node.Value = int( Token.Value )
|
||||
|
||||
return node
|
||||
|
||||
# StringLiteral
|
||||
# : String
|
||||
# ;
|
||||
#
|
||||
func parse_StringLiteral():
|
||||
var Token = self.eat(TokenType.String)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = TokenType.String
|
||||
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
|
||||
|
||||
return node
|
||||
|
||||
# Program
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Program():
|
||||
var \
|
||||
node = ProgramNode.new()
|
||||
node.Type = TokenType.Program
|
||||
node.Body = parse_Literal()
|
||||
|
||||
return node
|
||||
|
||||
# Parses the text program description into an AST.
|
||||
func parse(TokenizerRef):
|
||||
self.TokenizerRef = TokenizerRef
|
||||
|
||||
NextToken = TokenizerRef.next_Token()
|
||||
|
||||
return parse_Program()
|
||||
|
||||
var GParser = Parser.new()
|
||||
|
||||
|
||||
var ProgramDescription : String
|
||||
|
||||
func test():
|
||||
GTokenizer.init(ProgramDescription)
|
||||
|
||||
var ast = GParser.parse(GTokenizer)
|
||||
|
||||
print(JSON.print(ast.toDict(), "\t"))
|
||||
|
||||
|
||||
# Main Entry point.
|
||||
func _ready():
|
||||
# Numerical test
|
||||
ProgramDescription = "47"
|
||||
test()
|
||||
|
||||
# String Test
|
||||
ProgramDescription = "\"hello\""
|
||||
test()
|
||||
|
||||
# Whitespace test
|
||||
ProgramDescription = " \"we got past whitespace\" "
|
||||
test()
|
||||
|
||||
# Comment Single Test
|
||||
ProgramDescription = \
|
||||
"""
|
||||
// Testing a comment
|
||||
\"hello sir\"
|
||||
"""
|
||||
test()
|
||||
|
||||
# Comment Multi-Line Test
|
||||
ProgramDescription = \
|
||||
"""
|
||||
/**
|
||||
*
|
||||
* Testing a comment
|
||||
*/
|
||||
\"may I have some grapes\"
|
||||
"""
|
||||
test()
|
||||
|
||||
# Multi-statement test
|
||||
ProgramDescription = \
|
||||
"""
|
||||
// Testing a comment
|
||||
\"hello sir\";
|
||||
|
||||
/**
|
||||
*
|
||||
* Testing a comment
|
||||
*/
|
||||
\"may I have some grapes\";
|
||||
"""
|
||||
test()
|
||||
|
||||
# Called every frame. 'delta' is the elapsed time since the previous frame.
|
||||
#func _process(delta):
|
||||
# pass
|
||||
|
||||
|
||||
|
307
Editor/Lectures/Lecture.4.gd
Normal file
307
Editor/Lectures/Lecture.4.gd
Normal file
@ -0,0 +1,307 @@
|
||||
extends Node
|
||||
|
||||
# This closesly follows the source provided in the lectures.
|
||||
# Later on after the lectures are complete or when I deem
|
||||
# Necessary there will be heavy refactors.
|
||||
|
||||
const TokenType = \
|
||||
{
|
||||
Program = "Program",
|
||||
|
||||
# Comments
|
||||
CommentLine = "CommentLine",
|
||||
CommentMultiLine = "CommentMultiLine",
|
||||
|
||||
# Formatting
|
||||
Whitespace = "Whitespace",
|
||||
|
||||
# Statements
|
||||
StatementEnd = "StatementEnd",
|
||||
|
||||
# Literals
|
||||
Number = "Number",
|
||||
String = "String"
|
||||
}
|
||||
|
||||
const TokenSpec = \
|
||||
{
|
||||
TokenType.CommentLine : "^\/\/.*",
|
||||
TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/",
|
||||
TokenType.Whitespace : "^\\s+",
|
||||
TokenType.Number : "\\d+",
|
||||
TokenType.String : "^\"[^\"]*\"",
|
||||
TokenType.StatementEnd : "^;"
|
||||
}
|
||||
|
||||
class Token:
|
||||
var Type : String
|
||||
var Value : String
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class Tokenizer:
|
||||
var SrcTxt : String
|
||||
var Cursor : int;
|
||||
|
||||
# Sets up the tokenizer with the program source text.
|
||||
func init(programSrcText):
|
||||
SrcTxt = programSrcText
|
||||
Cursor = 0
|
||||
|
||||
# Provides the next token in the source text.
|
||||
func next_Token():
|
||||
if self.reached_EndOfTxt() == true :
|
||||
return null
|
||||
|
||||
var srcLeft = self.SrcTxt.substr(Cursor)
|
||||
var regex = RegEx.new()
|
||||
var token = Token.new()
|
||||
|
||||
for type in TokenSpec :
|
||||
regex.compile(TokenSpec[type])
|
||||
|
||||
var result = regex.search(srcLeft)
|
||||
if result == null || result.get_start() != 0 :
|
||||
continue
|
||||
|
||||
# Skip Comments
|
||||
if type == TokenType.CommentLine || type == TokenType.CommentMultiLine :
|
||||
self.Cursor += result.get_string().length()
|
||||
return next_Token()
|
||||
|
||||
# Skip Whitespace
|
||||
if type == TokenType.Whitespace :
|
||||
var addVal = result.get_string().length()
|
||||
self.Cursor += addVal
|
||||
|
||||
return next_Token()
|
||||
|
||||
token.Type = type
|
||||
token.Value = result.get_string()
|
||||
self.Cursor += ( result.get_string().length() )
|
||||
|
||||
return token
|
||||
|
||||
var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}"
|
||||
var assertStr = assertStrTmplt.format({"value" : self.Cursor})
|
||||
assert(true != true, assertStr)
|
||||
return null
|
||||
|
||||
func reached_EndOfTxt():
|
||||
return self.Cursor >= ( self.SrcTxt.length() - 1 )
|
||||
|
||||
var GTokenizer = Tokenizer.new()
|
||||
|
||||
|
||||
const SyntaxNodeType = \
|
||||
{
|
||||
NumericLiteral = "NumericLiteral",
|
||||
StringLiteral = "StringLiteral",
|
||||
ExpressionStatement = "ExpressionStatement"
|
||||
}
|
||||
|
||||
class SyntaxNode:
|
||||
var Type : String
|
||||
var Value # Not specifing a type implicity declares a Variant type.
|
||||
|
||||
func toDict():
|
||||
var ValueDict = self.Value
|
||||
if typeof(Value) == TYPE_ARRAY :
|
||||
var dict = {}
|
||||
var index = 0
|
||||
for entry in self.Value :
|
||||
dict[index] = entry.toDict()
|
||||
index += 1
|
||||
|
||||
ValueDict = dict
|
||||
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = ValueDict
|
||||
}
|
||||
return result
|
||||
|
||||
class ProgramNode:
|
||||
var Type : String
|
||||
var Body : Object
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Body = self.Body.toDict()
|
||||
}
|
||||
return result
|
||||
|
||||
class Parser:
|
||||
var TokenizerRef : Tokenizer
|
||||
var NextToken : Token
|
||||
|
||||
func eat(tokenType):
|
||||
var currToken = self.NextToken
|
||||
|
||||
assert(currToken != null, "eat: NextToken was null")
|
||||
|
||||
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
|
||||
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
|
||||
|
||||
assert(currToken.Type == tokenType, assertStr)
|
||||
|
||||
self.NextToken = self.TokenizerRef.next_Token()
|
||||
|
||||
return currToken
|
||||
|
||||
# Literal
|
||||
# : NumericLiteral
|
||||
# : StringLiteral
|
||||
# ;
|
||||
#
|
||||
func parse_Literal():
|
||||
match NextToken.Type :
|
||||
TokenType.Number:
|
||||
return parse_NumericLiteral()
|
||||
TokenType.String:
|
||||
return parse_StringLiteral()
|
||||
|
||||
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
|
||||
|
||||
# NumericLiteral
|
||||
# : Number
|
||||
# ;
|
||||
#
|
||||
func parse_NumericLiteral():
|
||||
var Token = eat(TokenType.Number)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.NumericLiteral
|
||||
node.Value = int( Token.Value )
|
||||
|
||||
return node
|
||||
|
||||
# StringLiteral
|
||||
# : String
|
||||
# ;
|
||||
#
|
||||
func parse_StringLiteral():
|
||||
var Token = eat(TokenType.String)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.StringLiteral
|
||||
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
|
||||
|
||||
return node
|
||||
|
||||
# Expression
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Expression():
|
||||
return parse_Literal()
|
||||
|
||||
# ExpressionStatement
|
||||
# : Expression
|
||||
# ;
|
||||
#
|
||||
func parse_ExpressionStatement():
|
||||
var expression = parse_Expression()
|
||||
eat(TokenType.StatementEnd)
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.ExpressionStatement
|
||||
node.Value = expression
|
||||
|
||||
return expression
|
||||
|
||||
# Statement
|
||||
# : ExpressionStatement
|
||||
# ;
|
||||
#
|
||||
func parse_Statement():
|
||||
return parse_ExpressionStatement()
|
||||
|
||||
# StatementList
|
||||
# : Statement
|
||||
# | StatementList Statement -> Statement ...
|
||||
# ;
|
||||
#
|
||||
func parse_StatementList():
|
||||
var statementList = [ parse_Statement() ]
|
||||
|
||||
while NextToken != null :
|
||||
statementList.append( parse_Statement() )
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = "StatementList"
|
||||
node.Value = statementList
|
||||
|
||||
return node
|
||||
|
||||
# Program
|
||||
# : StatementList
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Program():
|
||||
var \
|
||||
node = ProgramNode.new()
|
||||
node.Type = TokenType.Program
|
||||
node.Body = parse_StatementList()
|
||||
|
||||
return node
|
||||
|
||||
# Parses the text program description into an AST.
|
||||
func parse(TokenizerRef):
|
||||
self.TokenizerRef = TokenizerRef
|
||||
|
||||
NextToken = TokenizerRef.next_Token()
|
||||
|
||||
return parse_Program()
|
||||
|
||||
var GParser = Parser.new()
|
||||
|
||||
const Tests = \
|
||||
{
|
||||
MultiStatement = \
|
||||
{
|
||||
Name = "Multi-Statement",
|
||||
File = "1.Multi-Statement.uf"
|
||||
}
|
||||
}
|
||||
|
||||
func test(entry):
|
||||
var introMessage = "Testing: {Name}"
|
||||
var introMessageFormatted = introMessage.format({"Name" : entry.Name})
|
||||
print(introMessageFormatted)
|
||||
|
||||
var path = "res://Tests/{TestName}"
|
||||
var pathFormatted = path.format({"TestName" : entry.File})
|
||||
|
||||
var \
|
||||
file = File.new()
|
||||
file.open(pathFormatted, File.READ)
|
||||
|
||||
var programDescription = file.get_as_text()
|
||||
file.close()
|
||||
|
||||
GTokenizer.init(programDescription)
|
||||
var ast = GParser.parse(GTokenizer)
|
||||
|
||||
var json = JSON.print(ast.toDict(), "\t")
|
||||
|
||||
print(JSON.print(ast.toDict(), "\t"))
|
||||
print("Passed!\n")
|
||||
|
||||
|
||||
# Main Entry point.
|
||||
func _ready():
|
||||
for Key in Tests :
|
||||
test(Tests[Key])
|
373
Editor/Lectures/Lecture.5.gd
Normal file
373
Editor/Lectures/Lecture.5.gd
Normal file
@ -0,0 +1,373 @@
|
||||
extends Node
|
||||
|
||||
# This closesly follows the source provided in the lectures.
|
||||
# Later on after the lectures are complete or when I deem
|
||||
# Necessary there will be heavy refactors.
|
||||
|
||||
const TokenType = \
|
||||
{
|
||||
Program = "Program",
|
||||
|
||||
# Comments
|
||||
CommentLine = "CommentLine",
|
||||
CommentMultiLine = "CommentMultiLine",
|
||||
|
||||
# Formatting
|
||||
Whitespace = "Whitespace",
|
||||
|
||||
# Statements
|
||||
StatementEnd = "StatementEnd",
|
||||
StmtBlockStart = "BlockStatementStart",
|
||||
StmtBlockEnd = "BlockStatementEnd",
|
||||
|
||||
# Literals
|
||||
Number = "Number",
|
||||
String = "String"
|
||||
}
|
||||
|
||||
const TokenSpec = \
|
||||
{
|
||||
TokenType.CommentLine : "^\/\/.*",
|
||||
TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/",
|
||||
TokenType.Whitespace : "^\\s+",
|
||||
TokenType.Number : "\\d+",
|
||||
TokenType.String : "^\"[^\"]*\"",
|
||||
TokenType.StatementEnd : "^;",
|
||||
TokenType.StmtBlockStart : "^{",
|
||||
TokenType.StmtBlockEnd : "^}"
|
||||
}
|
||||
|
||||
class Token:
|
||||
var Type : String
|
||||
var Value : String
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class Tokenizer:
|
||||
var SrcTxt : String
|
||||
var Cursor : int;
|
||||
|
||||
# Sets up the tokenizer with the program source text.
|
||||
func init(programSrcText):
|
||||
SrcTxt = programSrcText
|
||||
Cursor = 0
|
||||
|
||||
# Provides the next token in the source text.
|
||||
func next_Token():
|
||||
if self.reached_EndOfTxt() == true :
|
||||
return null
|
||||
|
||||
var srcLeft = self.SrcTxt.substr(Cursor)
|
||||
var regex = RegEx.new()
|
||||
var token = Token.new()
|
||||
|
||||
for type in TokenSpec :
|
||||
regex.compile(TokenSpec[type])
|
||||
|
||||
var result = regex.search(srcLeft)
|
||||
if result == null || result.get_start() != 0 :
|
||||
continue
|
||||
|
||||
# Skip Comments
|
||||
if type == TokenType.CommentLine || type == TokenType.CommentMultiLine :
|
||||
self.Cursor += result.get_string().length()
|
||||
return next_Token()
|
||||
|
||||
# Skip Whitespace
|
||||
if type == TokenType.Whitespace :
|
||||
var addVal = result.get_string().length()
|
||||
self.Cursor += addVal
|
||||
|
||||
return next_Token()
|
||||
|
||||
token.Type = type
|
||||
token.Value = result.get_string()
|
||||
self.Cursor += ( result.get_string().length() )
|
||||
|
||||
return token
|
||||
|
||||
var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}"
|
||||
var assertStr = assertStrTmplt.format({"value" : self.Cursor})
|
||||
assert(true != true, assertStr)
|
||||
return null
|
||||
|
||||
func reached_EndOfTxt():
|
||||
return self.Cursor >= ( self.SrcTxt.length() )
|
||||
|
||||
var GTokenizer = Tokenizer.new()
|
||||
|
||||
|
||||
const SyntaxNodeType = \
|
||||
{
|
||||
NumericLiteral = "NumericLiteral",
|
||||
StringLiteral = "StringLiteral",
|
||||
ExpressionStatement = "ExpressionStatement",
|
||||
BlockStatement = "BlockStatement",
|
||||
EmptyStatement = "EmptyStatement"
|
||||
}
|
||||
|
||||
class SyntaxNode:
|
||||
var Type : String
|
||||
var Value # Not specifing a type implicity declares a Variant type.
|
||||
|
||||
func toDict():
|
||||
var ValueDict = self.Value
|
||||
if typeof(Value) == TYPE_ARRAY :
|
||||
var dict = {}
|
||||
var index = 0
|
||||
for entry in self.Value :
|
||||
dict[index] = entry.toDict()
|
||||
index += 1
|
||||
|
||||
ValueDict = dict
|
||||
|
||||
if typeof(Value) == TYPE_OBJECT :
|
||||
var reuslt = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value.toDict()
|
||||
}
|
||||
return reuslt
|
||||
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = ValueDict
|
||||
}
|
||||
return result
|
||||
|
||||
class ProgramNode:
|
||||
var Type : String
|
||||
var Body : Object
|
||||
|
||||
func toDict():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Body = self.Body.toDict()
|
||||
}
|
||||
return result
|
||||
|
||||
class Parser:
|
||||
var TokenizerRef : Tokenizer
|
||||
var NextToken : Token
|
||||
|
||||
func eat(tokenType):
|
||||
var currToken = self.NextToken
|
||||
|
||||
assert(currToken != null, "eat: NextToken was null")
|
||||
|
||||
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
|
||||
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
|
||||
|
||||
assert(currToken.Type == tokenType, assertStr)
|
||||
|
||||
self.NextToken = self.TokenizerRef.next_Token()
|
||||
|
||||
return currToken
|
||||
|
||||
# Literal
|
||||
# : NumericLiteral
|
||||
# : StringLiteral
|
||||
# ;
|
||||
#
|
||||
func parse_Literal():
|
||||
match NextToken.Type :
|
||||
TokenType.Number:
|
||||
return parse_NumericLiteral()
|
||||
TokenType.String:
|
||||
return parse_StringLiteral()
|
||||
|
||||
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
|
||||
|
||||
# NumericLiteral
|
||||
# : Number
|
||||
# ;
|
||||
#
|
||||
func parse_NumericLiteral():
|
||||
var Token = eat(TokenType.Number)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.NumericLiteral
|
||||
node.Value = int( Token.Value )
|
||||
|
||||
return node
|
||||
|
||||
# StringLiteral
|
||||
# : String
|
||||
# ;
|
||||
#
|
||||
func parse_StringLiteral():
|
||||
var Token = eat(TokenType.String)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.StringLiteral
|
||||
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
|
||||
|
||||
return node
|
||||
|
||||
# Expression
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Expression():
|
||||
return parse_Literal()
|
||||
|
||||
# EmptyStatement
|
||||
# ;
|
||||
#
|
||||
func parse_EmptyStatement():
|
||||
eat(TokenType.StatementEnd)
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.EmptyStatement
|
||||
|
||||
return node
|
||||
|
||||
# BlockStatement
|
||||
# : { OptStatementList }
|
||||
# ;
|
||||
#
|
||||
func parse_BlockStatement():
|
||||
eat(TokenType.StmtBlockStart)
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.BlockStatement
|
||||
|
||||
if NextToken.Type != TokenType.StmtBlockEnd :
|
||||
node.Value = parse_StatementList(TokenType.StmtBlockEnd)
|
||||
else :
|
||||
node.Value = []
|
||||
|
||||
eat(TokenType.StmtBlockEnd)
|
||||
|
||||
return node
|
||||
|
||||
# ExpressionStatement
|
||||
# : Expression
|
||||
# ;
|
||||
#
|
||||
func parse_ExpressionStatement():
|
||||
var expression = parse_Expression()
|
||||
eat(TokenType.StatementEnd)
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.ExpressionStatement
|
||||
node.Value = expression
|
||||
|
||||
return expression
|
||||
|
||||
# Statement
|
||||
# : ExpressionStatement
|
||||
# : BlockStatement
|
||||
# : EmptyStatement
|
||||
# ;
|
||||
#
|
||||
func parse_Statement():
|
||||
match NextToken.Type :
|
||||
TokenType.StatementEnd :
|
||||
return parse_EmptyStatement()
|
||||
TokenType.StmtBlockStart :
|
||||
return parse_BlockStatement()
|
||||
|
||||
return parse_ExpressionStatement()
|
||||
|
||||
# StatementList
|
||||
# : Statement
|
||||
# | StatementList Statement -> Statement ...
|
||||
# ;
|
||||
#
|
||||
func parse_StatementList(endToken):
|
||||
var statementList = [ parse_Statement() ]
|
||||
|
||||
while NextToken != null && NextToken.Type != endToken :
|
||||
statementList.append( parse_Statement() )
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = "StatementList"
|
||||
node.Value = statementList
|
||||
|
||||
return node
|
||||
|
||||
# Program
|
||||
# : StatementList
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Program():
|
||||
var \
|
||||
node = ProgramNode.new()
|
||||
node.Type = TokenType.Program
|
||||
node.Body = parse_StatementList(null)
|
||||
|
||||
return node
|
||||
|
||||
# Parses the text program description into an AST.
|
||||
func parse(TokenizerRef):
|
||||
self.TokenizerRef = TokenizerRef
|
||||
|
||||
NextToken = TokenizerRef.next_Token()
|
||||
|
||||
return parse_Program()
|
||||
|
||||
var GParser = Parser.new()
|
||||
|
||||
|
||||
|
||||
onready var TextOut = GScene.get_node("TextOutput")
|
||||
|
||||
func tout(text):
|
||||
TextOut.insert_text_at_cursor(text)
|
||||
|
||||
const Tests = \
|
||||
{
|
||||
MultiStatement = \
|
||||
{
|
||||
Name = "Multi-Statement",
|
||||
File = "1.Multi-Statement.uf"
|
||||
},
|
||||
BlockStatement = \
|
||||
{
|
||||
Name = "Block Statement",
|
||||
File = "2.BlockStatement.uf"
|
||||
}
|
||||
}
|
||||
|
||||
func test(entry):
|
||||
var introMessage = "Testing: {Name}\n"
|
||||
var introMessageFormatted = introMessage.format({"Name" : entry.Name})
|
||||
tout(introMessageFormatted)
|
||||
|
||||
var path = "res://Tests/{TestName}"
|
||||
var pathFormatted = path.format({"TestName" : entry.File})
|
||||
|
||||
var \
|
||||
file = File.new()
|
||||
file.open(pathFormatted, File.READ)
|
||||
|
||||
var programDescription = file.get_as_text()
|
||||
file.close()
|
||||
|
||||
GTokenizer.init(programDescription)
|
||||
var ast = GParser.parse(GTokenizer)
|
||||
|
||||
var json = JSON.print(ast.toDict(), "\t")
|
||||
|
||||
tout(json + "\n")
|
||||
tout("Passed!\n")
|
||||
|
||||
|
||||
# Main Entry point.
|
||||
func _ready():
|
||||
for Key in Tests :
|
||||
test(Tests[Key])
|
382
Editor/Lectures/Lecture.6.gd
Normal file
382
Editor/Lectures/Lecture.6.gd
Normal file
@ -0,0 +1,382 @@
|
||||
extends Node
|
||||
|
||||
# This closesly follows the source provided in the lectures.
|
||||
# Later on after the lectures are complete or when I deem
|
||||
# Necessary there will be heavy refactors.
|
||||
|
||||
const TokenType = \
|
||||
{
|
||||
Program = "Program",
|
||||
|
||||
# Comments
|
||||
CommentLine = "CommentLine",
|
||||
CommentMultiLine = "CommentMultiLine",
|
||||
|
||||
# Formatting
|
||||
Whitespace = "Whitespace",
|
||||
|
||||
# Statements
|
||||
StatementEnd = "StatementEnd",
|
||||
StmtBlockStart = "BlockStatementStart",
|
||||
StmtBlockEnd = "BlockStatementEnd",
|
||||
|
||||
# Literals
|
||||
Number = "Number",
|
||||
String = "String"
|
||||
}
|
||||
|
||||
const TokenSpec = \
|
||||
{
|
||||
TokenType.CommentLine : "^\/\/.*",
|
||||
TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/",
|
||||
TokenType.Whitespace : "^\\s+",
|
||||
TokenType.Number : "\\d+",
|
||||
TokenType.String : "^\"[^\"]*\"",
|
||||
TokenType.StatementEnd : "^;",
|
||||
TokenType.StmtBlockStart : "^{",
|
||||
TokenType.StmtBlockEnd : "^}"
|
||||
}
|
||||
|
||||
class Token:
|
||||
var Type : String
|
||||
var Value : String
|
||||
|
||||
func to_Dictionary():
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class Tokenizer:
|
||||
var SrcTxt : String
|
||||
var Cursor : int;
|
||||
|
||||
# Sets up the tokenizer with the program source text.
|
||||
func init(programSrcText):
|
||||
SrcTxt = programSrcText
|
||||
Cursor = 0
|
||||
|
||||
# Provides the next token in the source text.
|
||||
func next_Token():
|
||||
if self.reached_EndOfTxt() == true :
|
||||
return null
|
||||
|
||||
var srcLeft = self.SrcTxt.substr(Cursor)
|
||||
var regex = RegEx.new()
|
||||
var token = Token.new()
|
||||
|
||||
for type in TokenSpec :
|
||||
regex.compile(TokenSpec[type])
|
||||
|
||||
var result = regex.search(srcLeft)
|
||||
if result == null || result.get_start() != 0 :
|
||||
continue
|
||||
|
||||
# Skip Comments
|
||||
if type == TokenType.CommentLine || type == TokenType.CommentMultiLine :
|
||||
self.Cursor += result.get_string().length()
|
||||
return next_Token()
|
||||
|
||||
# Skip Whitespace
|
||||
if type == TokenType.Whitespace :
|
||||
var addVal = result.get_string().length()
|
||||
self.Cursor += addVal
|
||||
|
||||
return next_Token()
|
||||
|
||||
token.Type = type
|
||||
token.Value = result.get_string()
|
||||
self.Cursor += ( result.get_string().length() )
|
||||
|
||||
return token
|
||||
|
||||
var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}"
|
||||
var assertStr = assertStrTmplt.format({"value" : self.Cursor})
|
||||
assert(true != true, assertStr)
|
||||
return null
|
||||
|
||||
func reached_EndOfTxt():
|
||||
return self.Cursor >= ( self.SrcTxt.length() )
|
||||
|
||||
var GTokenizer = Tokenizer.new()
|
||||
|
||||
|
||||
|
||||
const AST_Format = \
|
||||
{
|
||||
Dictionary = "Dictionary",
|
||||
SExpression = "S-Expression"
|
||||
}
|
||||
|
||||
const SyntaxNodeType = \
|
||||
{
|
||||
NumericLiteral = "NumericLiteral",
|
||||
StringLiteral = "StringLiteral",
|
||||
ExpressionStatement = "ExpressionStatement",
|
||||
BlockStatement = "BlockStatement",
|
||||
EmptyStatement = "EmptyStatement"
|
||||
}
|
||||
|
||||
class SyntaxNode:
|
||||
var Type : String
|
||||
var Value # Not specifing a type implicity declares a Variant type.
|
||||
|
||||
func to_SExpression():
|
||||
var expression = [ Type ]
|
||||
|
||||
if typeof(Value) == TYPE_ARRAY :
|
||||
var array = []
|
||||
for entry in self.Value :
|
||||
array.append( entry.to_SExpression() )
|
||||
|
||||
expression.append(array)
|
||||
return expression
|
||||
|
||||
if typeof(Value) == TYPE_OBJECT :
|
||||
var result = [ Type, Value.to_SExpression() ]
|
||||
return result
|
||||
|
||||
expression.append(Value)
|
||||
return expression
|
||||
|
||||
func to_Dictionary():
|
||||
if typeof(Value) == TYPE_ARRAY :
|
||||
var array = []
|
||||
for entry in self.Value :
|
||||
array.append(entry.to_Dictionary())
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = array
|
||||
}
|
||||
return result
|
||||
|
||||
if typeof(Value) == TYPE_OBJECT :
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value.to_Dictionary()
|
||||
}
|
||||
return result
|
||||
|
||||
var result = \
|
||||
{
|
||||
Type = self.Type,
|
||||
Value = self.Value
|
||||
}
|
||||
return result
|
||||
|
||||
class Parser:
|
||||
var TokenizerRef : Tokenizer
|
||||
var NextToken : Token
|
||||
|
||||
func eat(tokenType):
|
||||
var currToken = self.NextToken
|
||||
|
||||
assert(currToken != null, "eat: NextToken was null")
|
||||
|
||||
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
|
||||
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
|
||||
|
||||
assert(currToken.Type == tokenType, assertStr)
|
||||
|
||||
self.NextToken = self.TokenizerRef.next_Token()
|
||||
|
||||
return currToken
|
||||
|
||||
# Literal
|
||||
# : NumericLiteral
|
||||
# : StringLiteral
|
||||
# ;
|
||||
#
|
||||
func parse_Literal():
|
||||
match NextToken.Type :
|
||||
TokenType.Number:
|
||||
return parse_NumericLiteral()
|
||||
TokenType.String:
|
||||
return parse_StringLiteral()
|
||||
|
||||
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
|
||||
|
||||
# NumericLiteral
|
||||
# : Number
|
||||
# ;
|
||||
#
|
||||
func parse_NumericLiteral():
|
||||
var Token = eat(TokenType.Number)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.NumericLiteral
|
||||
node.Value = int( Token.Value )
|
||||
|
||||
return node
|
||||
|
||||
# StringLiteral
|
||||
# : String
|
||||
# ;
|
||||
#
|
||||
func parse_StringLiteral():
|
||||
var Token = eat(TokenType.String)
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.StringLiteral
|
||||
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
|
||||
|
||||
return node
|
||||
|
||||
# Expression
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Expression():
|
||||
return parse_Literal()
|
||||
|
||||
# EmptyStatement
|
||||
# ;
|
||||
#
|
||||
func parse_EmptyStatement():
|
||||
eat(TokenType.StatementEnd)
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.EmptyStatement
|
||||
|
||||
return node
|
||||
|
||||
# BlockStatement
|
||||
# : { OptStatementList }
|
||||
# ;
|
||||
#
|
||||
func parse_BlockStatement():
|
||||
eat(TokenType.StmtBlockStart)
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.BlockStatement
|
||||
|
||||
if NextToken.Type != TokenType.StmtBlockEnd :
|
||||
node.Value = parse_StatementList(TokenType.StmtBlockEnd)
|
||||
else :
|
||||
node.Value = []
|
||||
|
||||
eat(TokenType.StmtBlockEnd)
|
||||
|
||||
return node
|
||||
|
||||
# ExpressionStatement
|
||||
# : Expression
|
||||
# ;
|
||||
#
|
||||
func parse_ExpressionStatement():
|
||||
var expression = parse_Expression()
|
||||
eat(TokenType.StatementEnd)
|
||||
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = SyntaxNodeType.ExpressionStatement
|
||||
node.Value = expression
|
||||
|
||||
return expression
|
||||
|
||||
# Statement
|
||||
# : ExpressionStatement
|
||||
# : BlockStatement
|
||||
# : EmptyStatement
|
||||
# ;
|
||||
#
|
||||
func parse_Statement():
|
||||
match NextToken.Type :
|
||||
TokenType.StatementEnd :
|
||||
return parse_EmptyStatement()
|
||||
TokenType.StmtBlockStart :
|
||||
return parse_BlockStatement()
|
||||
|
||||
return parse_ExpressionStatement()
|
||||
|
||||
# StatementList
|
||||
# : Statement
|
||||
# | StatementList Statement -> Statement ...
|
||||
# ;
|
||||
#
|
||||
func parse_StatementList(endToken):
|
||||
var statementList = [ parse_Statement() ]
|
||||
|
||||
while NextToken != null && NextToken.Type != endToken :
|
||||
statementList.append( parse_Statement() )
|
||||
|
||||
return statementList
|
||||
|
||||
# Program
|
||||
# : StatementList
|
||||
# : Literal
|
||||
# ;
|
||||
#
|
||||
func parse_Program():
|
||||
var \
|
||||
node = SyntaxNode.new()
|
||||
node.Type = TokenType.Program
|
||||
node.Value = parse_StatementList(null)
|
||||
|
||||
return node
|
||||
|
||||
# Parses the text program description into an AST.
|
||||
func parse(TokenizerRef):
|
||||
self.TokenizerRef = TokenizerRef
|
||||
|
||||
NextToken = TokenizerRef.next_Token()
|
||||
|
||||
return parse_Program()
|
||||
|
||||
var GParser = Parser.new()
|
||||
|
||||
|
||||
|
||||
onready var TextOut = GScene.get_node("TextOutput")
|
||||
|
||||
func tout(text):
|
||||
TextOut.insert_text_at_cursor(text)
|
||||
|
||||
const Tests = \
|
||||
{
|
||||
MultiStatement = \
|
||||
{
|
||||
Name = "Multi-Statement",
|
||||
File = "1.Multi-Statement.uf"
|
||||
},
|
||||
BlockStatement = \
|
||||
{
|
||||
Name = "Block Statement",
|
||||
File = "2.BlockStatement.uf"
|
||||
}
|
||||
}
|
||||
|
||||
func test(entry):
|
||||
var introMessage = "Testing: {Name}\n"
|
||||
var introMessageFormatted = introMessage.format({"Name" : entry.Name})
|
||||
tout(introMessageFormatted)
|
||||
|
||||
var path = "res://Tests/{TestName}"
|
||||
var pathFormatted = path.format({"TestName" : entry.File})
|
||||
|
||||
var \
|
||||
file = File.new()
|
||||
file.open(pathFormatted, File.READ)
|
||||
|
||||
var programDescription = file.get_as_text()
|
||||
file.close()
|
||||
|
||||
GTokenizer.init(programDescription)
|
||||
var ast = GParser.parse(GTokenizer)
|
||||
|
||||
var json = JSON.print(ast.to_SExpression(), '\t')
|
||||
|
||||
tout(json + "\n")
|
||||
tout("Passed!\n")
|
||||
|
||||
|
||||
# Main Entry point.
|
||||
func _ready():
|
||||
for Key in Tests :
|
||||
test(Tests[Key])
|
27
Editor/Lectures/Lecture.tscn
Normal file
27
Editor/Lectures/Lecture.tscn
Normal file
@ -0,0 +1,27 @@
|
||||
[gd_scene load_steps=3 format=2]
|
||||
|
||||
[ext_resource path="res://Assets/Styles/EditorTheme.tres" type="Theme" id=1]
|
||||
[ext_resource path="res://Assets/Branding/RDP_Class_cover_small.png" type="Texture" id=2]
|
||||
|
||||
[node name="Control" type="Control"]
|
||||
anchor_right = 1.0
|
||||
anchor_bottom = 1.0
|
||||
|
||||
[node name="CourseBrand" type="TextureRect" parent="."]
|
||||
anchor_right = 1.0
|
||||
anchor_bottom = 1.0
|
||||
rect_scale = Vector2( 0.25, 0.25 )
|
||||
texture = ExtResource( 2 )
|
||||
expand = true
|
||||
stretch_mode = 6
|
||||
|
||||
[node name="TextOutput" type="TextEdit" parent="."]
|
||||
anchor_left = 0.25
|
||||
anchor_right = 1.0
|
||||
anchor_bottom = 1.0
|
||||
grow_horizontal = 0
|
||||
theme = ExtResource( 1 )
|
||||
readonly = true
|
||||
highlight_current_line = true
|
||||
show_line_numbers = true
|
||||
minimap_draw = true
|
Reference in New Issue
Block a user