Made a UI for results, BAPFS - Lecture 4, 5, and 6 done.

This commit is contained in:
Edward R. Gonzalez 2022-07-09 02:57:00 -04:00
parent 6cc89cb69a
commit f3e12c91b4
34 changed files with 1222 additions and 179 deletions

View File

Before

Width:  |  Height:  |  Size: 495 KiB

After

Width:  |  Height:  |  Size: 495 KiB

View File

@ -2,15 +2,15 @@
importer="texture" importer="texture"
type="StreamTexture" type="StreamTexture"
path="res://.import/RDP_Class_cover_small.png-51d9e4e36c8441da2486970409e2a06b.stex" path="res://.import/RDP_Class_cover_small.png-ab70c489e9b3c0feb8bbeb581c2176f1.stex"
metadata={ metadata={
"vram_texture": false "vram_texture": false
} }
[deps] [deps]
source_file="res://Branding/RDP_Class_cover_small.png" source_file="res://Assets/Branding/RDP_Class_cover_small.png"
dest_files=[ "res://.import/RDP_Class_cover_small.png-51d9e4e36c8441da2486970409e2a06b.stex" ] dest_files=[ "res://.import/RDP_Class_cover_small.png-ab70c489e9b3c0feb8bbeb581c2176f1.stex" ]
[params] [params]

View File

@ -0,0 +1,9 @@
[gd_resource type="DynamicFont" load_steps=2 format=2]
[sub_resource type="DynamicFontData" id=1]
font_path = "res://Assets/Fonts/RecMonoSemicasual-Regular-1.084.ttf"
[resource]
size = 14
use_mipmaps = true
font_data = SubResource( 1 )

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,4 @@
[gd_resource type="StyleBoxFlat" format=2]
[resource]
bg_color = Color( 0.0941176, 0.0666667, 0.137255, 1 )

View File

@ -0,0 +1,11 @@
[gd_resource type="Theme" load_steps=3 format=2]
[ext_resource path="res://Assets/Styles/Editor.SytleBoxFlat.tres" type="StyleBox" id=1]
[ext_resource path="res://Assets/Fonts/DF_RecMonoSemiCasul.tres" type="DynamicFont" id=2]
[resource]
TextEdit/colors/font_color = Color( 1, 1, 1, 1 )
TextEdit/colors/font_color_readonly = Color( 1, 1, 1, 1 )
TextEdit/fonts/font = ExtResource( 2 )
TextEdit/styles/normal = ExtResource( 1 )
TextEdit/styles/read_only = ExtResource( 1 )

View File

@ -1,6 +0,0 @@
[gd_scene load_steps=2 format=2]
[ext_resource path="res://Lecture.1.gd" type="Script" id=1]
[node name="Test" type="Node2D"]
script = ExtResource( 1 )

View File

@ -1,6 +0,0 @@
[gd_scene load_steps=2 format=2]
[ext_resource path="res://Lecture.2.gd" type="Script" id=1]
[node name="Test" type="Node2D"]
script = ExtResource( 1 )

View File

@ -1,8 +0,0 @@
[gd_scene load_steps=2 format=2]
[ext_resource path="res://Lecture.3..gd" type="Script" id=1]
[node name="Control" type="Control"]
anchor_right = 1.0
anchor_bottom = 1.0
script = ExtResource( 1 )

View File

@ -1,7 +1,5 @@
extends Node extends Node
const JsonBeautifier = preload("res://ThirdParty/json_beautifier.gd")
# This closesly follows the source provided in the lectures. # This closesly follows the source provided in the lectures.
# Later on after the lectures are complete or when I deem # Later on after the lectures are complete or when I deem
# Necessary there will be heavy refactors. # Necessary there will be heavy refactors.
@ -194,14 +192,14 @@ func _ready():
GTokenizer.init(ProgramDescription) GTokenizer.init(ProgramDescription)
var ast = GParser.parse(GTokenizer) var ast = GParser.parse(GTokenizer)
print(JsonBeautifier.beautify_json(to_json(ast.toDict()))) print(JSON.print(ast.toDict(), "\t"))
# String Test # String Test
ProgramDescription = "\"hello\"" ProgramDescription = "\"hello\""
GTokenizer.init(ProgramDescription) GTokenizer.init(ProgramDescription)
ast = GParser.parse(GTokenizer) ast = GParser.parse(GTokenizer)
print(JsonBeautifier.beautify_json(to_json(ast.toDict()))) print(JSON.print(ast.toDict(), "\t"))
# Called every frame. 'delta' is the elapsed time since the previous frame. # Called every frame. 'delta' is the elapsed time since the previous frame.

View File

@ -1,7 +1,5 @@
extends Node extends Node
const JsonBeautifier = preload("res://ThirdParty/json_beautifier.gd")
# This closesly follows the source provided in the lectures. # This closesly follows the source provided in the lectures.
# Later on after the lectures are complete or when I deem # Later on after the lectures are complete or when I deem
# Necessary there will be heavy refactors. # Necessary there will be heavy refactors.
@ -208,7 +206,7 @@ func test():
var ast = GParser.parse(GTokenizer) var ast = GParser.parse(GTokenizer)
print(JsonBeautifier.beautify_json(to_json(ast.toDict()))) print(JSON.print(ast.toDict(), "\t"))
# Main Entry point. # Main Entry point.

View File

@ -0,0 +1,307 @@
extends Node
# This closesly follows the source provided in the lectures.
# Later on after the lectures are complete or when I deem
# Necessary there will be heavy refactors.
const TokenType = \
{
Program = "Program",
# Comments
CommentLine = "CommentLine",
CommentMultiLine = "CommentMultiLine",
# Formatting
Whitespace = "Whitespace",
# Statements
StatementEnd = "StatementEnd",
# Literals
Number = "Number",
String = "String"
}
const TokenSpec = \
{
TokenType.CommentLine : "^\/\/.*",
TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/",
TokenType.Whitespace : "^\\s+",
TokenType.Number : "\\d+",
TokenType.String : "^\"[^\"]*\"",
TokenType.StatementEnd : "^;"
}
class Token:
var Type : String
var Value : String
func toDict():
var result = \
{
Type = self.Type,
Value = self.Value
}
return result
class Tokenizer:
var SrcTxt : String
var Cursor : int;
# Sets up the tokenizer with the program source text.
func init(programSrcText):
SrcTxt = programSrcText
Cursor = 0
# Provides the next token in the source text.
func next_Token():
if self.reached_EndOfTxt() == true :
return null
var srcLeft = self.SrcTxt.substr(Cursor)
var regex = RegEx.new()
var token = Token.new()
for type in TokenSpec :
regex.compile(TokenSpec[type])
var result = regex.search(srcLeft)
if result == null || result.get_start() != 0 :
continue
# Skip Comments
if type == TokenType.CommentLine || type == TokenType.CommentMultiLine :
self.Cursor += result.get_string().length()
return next_Token()
# Skip Whitespace
if type == TokenType.Whitespace :
var addVal = result.get_string().length()
self.Cursor += addVal
return next_Token()
token.Type = type
token.Value = result.get_string()
self.Cursor += ( result.get_string().length() )
return token
var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}"
var assertStr = assertStrTmplt.format({"value" : self.Cursor})
assert(true != true, assertStr)
return null
func reached_EndOfTxt():
return self.Cursor >= ( self.SrcTxt.length() - 1 )
var GTokenizer = Tokenizer.new()
const SyntaxNodeType = \
{
NumericLiteral = "NumericLiteral",
StringLiteral = "StringLiteral",
ExpressionStatement = "ExpressionStatement"
}
class SyntaxNode:
var Type : String
var Value # Not specifing a type implicity declares a Variant type.
func toDict():
var ValueDict = self.Value
if typeof(Value) == TYPE_ARRAY :
var dict = {}
var index = 0
for entry in self.Value :
dict[index] = entry.toDict()
index += 1
ValueDict = dict
var result = \
{
Type = self.Type,
Value = ValueDict
}
return result
class ProgramNode:
var Type : String
var Body : Object
func toDict():
var result = \
{
Type = self.Type,
Body = self.Body.toDict()
}
return result
class Parser:
var TokenizerRef : Tokenizer
var NextToken : Token
func eat(tokenType):
var currToken = self.NextToken
assert(currToken != null, "eat: NextToken was null")
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
assert(currToken.Type == tokenType, assertStr)
self.NextToken = self.TokenizerRef.next_Token()
return currToken
# Literal
# : NumericLiteral
# : StringLiteral
# ;
#
func parse_Literal():
match NextToken.Type :
TokenType.Number:
return parse_NumericLiteral()
TokenType.String:
return parse_StringLiteral()
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
# NumericLiteral
# : Number
# ;
#
func parse_NumericLiteral():
var Token = eat(TokenType.Number)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.NumericLiteral
node.Value = int( Token.Value )
return node
# StringLiteral
# : String
# ;
#
func parse_StringLiteral():
var Token = eat(TokenType.String)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.StringLiteral
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
return node
# Expression
# : Literal
# ;
#
func parse_Expression():
return parse_Literal()
# ExpressionStatement
# : Expression
# ;
#
func parse_ExpressionStatement():
var expression = parse_Expression()
eat(TokenType.StatementEnd)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.ExpressionStatement
node.Value = expression
return expression
# Statement
# : ExpressionStatement
# ;
#
func parse_Statement():
return parse_ExpressionStatement()
# StatementList
# : Statement
# | StatementList Statement -> Statement ...
# ;
#
func parse_StatementList():
var statementList = [ parse_Statement() ]
while NextToken != null :
statementList.append( parse_Statement() )
var \
node = SyntaxNode.new()
node.Type = "StatementList"
node.Value = statementList
return node
# Program
# : StatementList
# : Literal
# ;
#
func parse_Program():
var \
node = ProgramNode.new()
node.Type = TokenType.Program
node.Body = parse_StatementList()
return node
# Parses the text program description into an AST.
func parse(TokenizerRef):
self.TokenizerRef = TokenizerRef
NextToken = TokenizerRef.next_Token()
return parse_Program()
var GParser = Parser.new()
const Tests = \
{
MultiStatement = \
{
Name = "Multi-Statement",
File = "1.Multi-Statement.uf"
}
}
func test(entry):
var introMessage = "Testing: {Name}"
var introMessageFormatted = introMessage.format({"Name" : entry.Name})
print(introMessageFormatted)
var path = "res://Tests/{TestName}"
var pathFormatted = path.format({"TestName" : entry.File})
var \
file = File.new()
file.open(pathFormatted, File.READ)
var programDescription = file.get_as_text()
file.close()
GTokenizer.init(programDescription)
var ast = GParser.parse(GTokenizer)
var json = JSON.print(ast.toDict(), "\t")
print(JSON.print(ast.toDict(), "\t"))
print("Passed!\n")
# Main Entry point.
func _ready():
for Key in Tests :
test(Tests[Key])

View File

@ -0,0 +1,373 @@
extends Node
# This closesly follows the source provided in the lectures.
# Later on after the lectures are complete or when I deem
# Necessary there will be heavy refactors.
const TokenType = \
{
Program = "Program",
# Comments
CommentLine = "CommentLine",
CommentMultiLine = "CommentMultiLine",
# Formatting
Whitespace = "Whitespace",
# Statements
StatementEnd = "StatementEnd",
StmtBlockStart = "BlockStatementStart",
StmtBlockEnd = "BlockStatementEnd",
# Literals
Number = "Number",
String = "String"
}
const TokenSpec = \
{
TokenType.CommentLine : "^\/\/.*",
TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/",
TokenType.Whitespace : "^\\s+",
TokenType.Number : "\\d+",
TokenType.String : "^\"[^\"]*\"",
TokenType.StatementEnd : "^;",
TokenType.StmtBlockStart : "^{",
TokenType.StmtBlockEnd : "^}"
}
class Token:
var Type : String
var Value : String
func toDict():
var result = \
{
Type = self.Type,
Value = self.Value
}
return result
class Tokenizer:
var SrcTxt : String
var Cursor : int;
# Sets up the tokenizer with the program source text.
func init(programSrcText):
SrcTxt = programSrcText
Cursor = 0
# Provides the next token in the source text.
func next_Token():
if self.reached_EndOfTxt() == true :
return null
var srcLeft = self.SrcTxt.substr(Cursor)
var regex = RegEx.new()
var token = Token.new()
for type in TokenSpec :
regex.compile(TokenSpec[type])
var result = regex.search(srcLeft)
if result == null || result.get_start() != 0 :
continue
# Skip Comments
if type == TokenType.CommentLine || type == TokenType.CommentMultiLine :
self.Cursor += result.get_string().length()
return next_Token()
# Skip Whitespace
if type == TokenType.Whitespace :
var addVal = result.get_string().length()
self.Cursor += addVal
return next_Token()
token.Type = type
token.Value = result.get_string()
self.Cursor += ( result.get_string().length() )
return token
var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}"
var assertStr = assertStrTmplt.format({"value" : self.Cursor})
assert(true != true, assertStr)
return null
func reached_EndOfTxt():
return self.Cursor >= ( self.SrcTxt.length() )
var GTokenizer = Tokenizer.new()
const SyntaxNodeType = \
{
NumericLiteral = "NumericLiteral",
StringLiteral = "StringLiteral",
ExpressionStatement = "ExpressionStatement",
BlockStatement = "BlockStatement",
EmptyStatement = "EmptyStatement"
}
class SyntaxNode:
var Type : String
var Value # Not specifing a type implicity declares a Variant type.
func toDict():
var ValueDict = self.Value
if typeof(Value) == TYPE_ARRAY :
var dict = {}
var index = 0
for entry in self.Value :
dict[index] = entry.toDict()
index += 1
ValueDict = dict
if typeof(Value) == TYPE_OBJECT :
var reuslt = \
{
Type = self.Type,
Value = self.Value.toDict()
}
return reuslt
var result = \
{
Type = self.Type,
Value = ValueDict
}
return result
class ProgramNode:
var Type : String
var Body : Object
func toDict():
var result = \
{
Type = self.Type,
Body = self.Body.toDict()
}
return result
class Parser:
var TokenizerRef : Tokenizer
var NextToken : Token
func eat(tokenType):
var currToken = self.NextToken
assert(currToken != null, "eat: NextToken was null")
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
assert(currToken.Type == tokenType, assertStr)
self.NextToken = self.TokenizerRef.next_Token()
return currToken
# Literal
# : NumericLiteral
# : StringLiteral
# ;
#
func parse_Literal():
match NextToken.Type :
TokenType.Number:
return parse_NumericLiteral()
TokenType.String:
return parse_StringLiteral()
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
# NumericLiteral
# : Number
# ;
#
func parse_NumericLiteral():
var Token = eat(TokenType.Number)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.NumericLiteral
node.Value = int( Token.Value )
return node
# StringLiteral
# : String
# ;
#
func parse_StringLiteral():
var Token = eat(TokenType.String)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.StringLiteral
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
return node
# Expression
# : Literal
# ;
#
func parse_Expression():
return parse_Literal()
# EmptyStatement
# ;
#
func parse_EmptyStatement():
eat(TokenType.StatementEnd)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.EmptyStatement
return node
# BlockStatement
# : { OptStatementList }
# ;
#
func parse_BlockStatement():
eat(TokenType.StmtBlockStart)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.BlockStatement
if NextToken.Type != TokenType.StmtBlockEnd :
node.Value = parse_StatementList(TokenType.StmtBlockEnd)
else :
node.Value = []
eat(TokenType.StmtBlockEnd)
return node
# ExpressionStatement
# : Expression
# ;
#
func parse_ExpressionStatement():
var expression = parse_Expression()
eat(TokenType.StatementEnd)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.ExpressionStatement
node.Value = expression
return expression
# Statement
# : ExpressionStatement
# : BlockStatement
# : EmptyStatement
# ;
#
func parse_Statement():
match NextToken.Type :
TokenType.StatementEnd :
return parse_EmptyStatement()
TokenType.StmtBlockStart :
return parse_BlockStatement()
return parse_ExpressionStatement()
# StatementList
# : Statement
# | StatementList Statement -> Statement ...
# ;
#
func parse_StatementList(endToken):
var statementList = [ parse_Statement() ]
while NextToken != null && NextToken.Type != endToken :
statementList.append( parse_Statement() )
var \
node = SyntaxNode.new()
node.Type = "StatementList"
node.Value = statementList
return node
# Program
# : StatementList
# : Literal
# ;
#
func parse_Program():
var \
node = ProgramNode.new()
node.Type = TokenType.Program
node.Body = parse_StatementList(null)
return node
# Parses the text program description into an AST.
func parse(TokenizerRef):
self.TokenizerRef = TokenizerRef
NextToken = TokenizerRef.next_Token()
return parse_Program()
var GParser = Parser.new()
onready var TextOut = GScene.get_node("TextOutput")
func tout(text):
TextOut.insert_text_at_cursor(text)
const Tests = \
{
MultiStatement = \
{
Name = "Multi-Statement",
File = "1.Multi-Statement.uf"
},
BlockStatement = \
{
Name = "Block Statement",
File = "2.BlockStatement.uf"
}
}
func test(entry):
var introMessage = "Testing: {Name}\n"
var introMessageFormatted = introMessage.format({"Name" : entry.Name})
tout(introMessageFormatted)
var path = "res://Tests/{TestName}"
var pathFormatted = path.format({"TestName" : entry.File})
var \
file = File.new()
file.open(pathFormatted, File.READ)
var programDescription = file.get_as_text()
file.close()
GTokenizer.init(programDescription)
var ast = GParser.parse(GTokenizer)
var json = JSON.print(ast.toDict(), "\t")
tout(json + "\n")
tout("Passed!\n")
# Main Entry point.
func _ready():
for Key in Tests :
test(Tests[Key])

View File

@ -0,0 +1,382 @@
extends Node
# This closesly follows the source provided in the lectures.
# Later on after the lectures are complete or when I deem
# Necessary there will be heavy refactors.
const TokenType = \
{
Program = "Program",
# Comments
CommentLine = "CommentLine",
CommentMultiLine = "CommentMultiLine",
# Formatting
Whitespace = "Whitespace",
# Statements
StatementEnd = "StatementEnd",
StmtBlockStart = "BlockStatementStart",
StmtBlockEnd = "BlockStatementEnd",
# Literals
Number = "Number",
String = "String"
}
const TokenSpec = \
{
TokenType.CommentLine : "^\/\/.*",
TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/",
TokenType.Whitespace : "^\\s+",
TokenType.Number : "\\d+",
TokenType.String : "^\"[^\"]*\"",
TokenType.StatementEnd : "^;",
TokenType.StmtBlockStart : "^{",
TokenType.StmtBlockEnd : "^}"
}
class Token:
var Type : String
var Value : String
func to_Dictionary():
var result = \
{
Type = self.Type,
Value = self.Value
}
return result
class Tokenizer:
var SrcTxt : String
var Cursor : int;
# Sets up the tokenizer with the program source text.
func init(programSrcText):
SrcTxt = programSrcText
Cursor = 0
# Provides the next token in the source text.
func next_Token():
if self.reached_EndOfTxt() == true :
return null
var srcLeft = self.SrcTxt.substr(Cursor)
var regex = RegEx.new()
var token = Token.new()
for type in TokenSpec :
regex.compile(TokenSpec[type])
var result = regex.search(srcLeft)
if result == null || result.get_start() != 0 :
continue
# Skip Comments
if type == TokenType.CommentLine || type == TokenType.CommentMultiLine :
self.Cursor += result.get_string().length()
return next_Token()
# Skip Whitespace
if type == TokenType.Whitespace :
var addVal = result.get_string().length()
self.Cursor += addVal
return next_Token()
token.Type = type
token.Value = result.get_string()
self.Cursor += ( result.get_string().length() )
return token
var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}"
var assertStr = assertStrTmplt.format({"value" : self.Cursor})
assert(true != true, assertStr)
return null
func reached_EndOfTxt():
return self.Cursor >= ( self.SrcTxt.length() )
var GTokenizer = Tokenizer.new()
const AST_Format = \
{
Dictionary = "Dictionary",
SExpression = "S-Expression"
}
const SyntaxNodeType = \
{
NumericLiteral = "NumericLiteral",
StringLiteral = "StringLiteral",
ExpressionStatement = "ExpressionStatement",
BlockStatement = "BlockStatement",
EmptyStatement = "EmptyStatement"
}
class SyntaxNode:
var Type : String
var Value # Not specifing a type implicity declares a Variant type.
func to_SExpression():
var expression = [ Type ]
if typeof(Value) == TYPE_ARRAY :
var array = []
for entry in self.Value :
array.append( entry.to_SExpression() )
expression.append(array)
return expression
if typeof(Value) == TYPE_OBJECT :
var result = [ Type, Value.to_SExpression() ]
return result
expression.append(Value)
return expression
func to_Dictionary():
if typeof(Value) == TYPE_ARRAY :
var array = []
for entry in self.Value :
array.append(entry.to_Dictionary())
var result = \
{
Type = self.Type,
Value = array
}
return result
if typeof(Value) == TYPE_OBJECT :
var result = \
{
Type = self.Type,
Value = self.Value.to_Dictionary()
}
return result
var result = \
{
Type = self.Type,
Value = self.Value
}
return result
class Parser:
var TokenizerRef : Tokenizer
var NextToken : Token
func eat(tokenType):
var currToken = self.NextToken
assert(currToken != null, "eat: NextToken was null")
var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}"
var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType})
assert(currToken.Type == tokenType, assertStr)
self.NextToken = self.TokenizerRef.next_Token()
return currToken
# Literal
# : NumericLiteral
# : StringLiteral
# ;
#
func parse_Literal():
match NextToken.Type :
TokenType.Number:
return parse_NumericLiteral()
TokenType.String:
return parse_StringLiteral()
assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken")
# NumericLiteral
# : Number
# ;
#
func parse_NumericLiteral():
var Token = eat(TokenType.Number)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.NumericLiteral
node.Value = int( Token.Value )
return node
# StringLiteral
# : String
# ;
#
func parse_StringLiteral():
var Token = eat(TokenType.String)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.StringLiteral
node.Value = Token.Value.substr( 1, Token.Value.length() - 2 )
return node
# Expression
# : Literal
# ;
#
func parse_Expression():
return parse_Literal()
# EmptyStatement
# ;
#
func parse_EmptyStatement():
eat(TokenType.StatementEnd)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.EmptyStatement
return node
# BlockStatement
# : { OptStatementList }
# ;
#
func parse_BlockStatement():
eat(TokenType.StmtBlockStart)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.BlockStatement
if NextToken.Type != TokenType.StmtBlockEnd :
node.Value = parse_StatementList(TokenType.StmtBlockEnd)
else :
node.Value = []
eat(TokenType.StmtBlockEnd)
return node
# ExpressionStatement
# : Expression
# ;
#
func parse_ExpressionStatement():
var expression = parse_Expression()
eat(TokenType.StatementEnd)
var \
node = SyntaxNode.new()
node.Type = SyntaxNodeType.ExpressionStatement
node.Value = expression
return expression
# Statement
# : ExpressionStatement
# : BlockStatement
# : EmptyStatement
# ;
#
func parse_Statement():
match NextToken.Type :
TokenType.StatementEnd :
return parse_EmptyStatement()
TokenType.StmtBlockStart :
return parse_BlockStatement()
return parse_ExpressionStatement()
# StatementList
# : Statement
# | StatementList Statement -> Statement ...
# ;
#
func parse_StatementList(endToken):
var statementList = [ parse_Statement() ]
while NextToken != null && NextToken.Type != endToken :
statementList.append( parse_Statement() )
return statementList
# Program
# : StatementList
# : Literal
# ;
#
func parse_Program():
var \
node = SyntaxNode.new()
node.Type = TokenType.Program
node.Value = parse_StatementList(null)
return node
# Parses the text program description into an AST.
func parse(TokenizerRef):
self.TokenizerRef = TokenizerRef
NextToken = TokenizerRef.next_Token()
return parse_Program()
var GParser = Parser.new()
onready var TextOut = GScene.get_node("TextOutput")
func tout(text):
TextOut.insert_text_at_cursor(text)
const Tests = \
{
MultiStatement = \
{
Name = "Multi-Statement",
File = "1.Multi-Statement.uf"
},
BlockStatement = \
{
Name = "Block Statement",
File = "2.BlockStatement.uf"
}
}
func test(entry):
var introMessage = "Testing: {Name}\n"
var introMessageFormatted = introMessage.format({"Name" : entry.Name})
tout(introMessageFormatted)
var path = "res://Tests/{TestName}"
var pathFormatted = path.format({"TestName" : entry.File})
var \
file = File.new()
file.open(pathFormatted, File.READ)
var programDescription = file.get_as_text()
file.close()
GTokenizer.init(programDescription)
var ast = GParser.parse(GTokenizer)
var json = JSON.print(ast.to_SExpression(), '\t')
tout(json + "\n")
tout("Passed!\n")
# Main Entry point.
func _ready():
for Key in Tests :
test(Tests[Key])

View File

@ -0,0 +1,27 @@
[gd_scene load_steps=3 format=2]
[ext_resource path="res://Assets/Styles/EditorTheme.tres" type="Theme" id=1]
[ext_resource path="res://Assets/Branding/RDP_Class_cover_small.png" type="Texture" id=2]
[node name="Control" type="Control"]
anchor_right = 1.0
anchor_bottom = 1.0
[node name="CourseBrand" type="TextureRect" parent="."]
anchor_right = 1.0
anchor_bottom = 1.0
rect_scale = Vector2( 0.25, 0.25 )
texture = ExtResource( 2 )
expand = true
stretch_mode = 6
[node name="TextOutput" type="TextEdit" parent="."]
anchor_left = 0.25
anchor_right = 1.0
anchor_bottom = 1.0
grow_horizontal = 0
theme = ExtResource( 1 )
readonly = true
highlight_current_line = true
show_line_numbers = true
minimap_draw = true

3
Editor/Persistent.tscn Normal file
View File

@ -0,0 +1,3 @@
[gd_scene format=2]
[node name="P-Root" type="Node"]

View File

@ -0,0 +1,8 @@
// Testing a comment
"Hello World!";
/**
*
* Testing a comment
*/
42;

View File

@ -0,0 +1,18 @@
{
42;
"Hello World!";
}
{
}
{
42;
{
"Hello World!";
}
{
;
}
}

View File

@ -0,0 +1,7 @@
// Binary expression
2 + 2;
// Nested binary expressions:
// left : 3 + 2
// right : 2
3 + 2 - 2;

View File

@ -0,0 +1,5 @@
// Single
x = 42;
// Chained
x = (y = 42);

View File

@ -0,0 +1,8 @@
if (x)
{
x = 1;
}
else
{
x = 2;
}

View File

@ -0,0 +1 @@
x > 0;

View File

@ -0,0 +1,8 @@
// Declaration
let x;
// Single assignment declaration
let y = 42;
// Multiple declarations
let a, b;

View File

@ -1,133 +0,0 @@
###############################################################################
# JSON Beautifier #
# Copyright (C) 2018-2020 Michael Alexsander #
#-----------------------------------------------------------------------------#
# This Source Code Form is subject to the terms of the Mozilla Public #
# License, v. 2.0. If a copy of the MPL was not distributed with this #
# file, You can obtain one at http://mozilla.org/MPL/2.0/. #
###############################################################################
class_name JSONBeautifier
# Takes valid JSON (if invalid, it will return a error according with Godot's
# 'validade_json()' method) and a number of spaces for indentation (default is
# '0', in which it will use tabs instead), returning properly formatted JSON.
static func beautify_json(json: String, spaces := 0) -> String:
var error_message: String = validate_json(json)
if not error_message.empty():
return error_message
var indentation := ""
if spaces > 0:
for i in spaces:
indentation += " "
else:
indentation = "\t"
var quotation_start := -1
var char_position := 0
for i in json:
# Workaround a Godot quirk, as it allows JSON strings to end with a
# trailing comma.
if i == "," and char_position + 1 == json.length():
break
# Avoid formating inside strings.
if i == "\"":
if quotation_start == -1:
quotation_start = char_position
elif json[char_position - 1] != "\\":
quotation_start = -1
char_position += 1
continue
elif quotation_start != -1:
char_position += 1
continue
match i:
# Remove pre-existing formatting.
" ", "\n", "\t":
json[char_position] = ""
char_position -= 1
"{", "[", ",":
if json[char_position + 1] != "}" and\
json[char_position + 1] != "]":
json = json.insert(char_position + 1, "\n")
char_position += 1
"}", "]":
if json[char_position - 1] != "{" and\
json[char_position - 1] != "[":
json = json.insert(char_position, "\n")
char_position += 1
":":
json = json.insert(char_position + 1, " ")
char_position += 1
char_position += 1
for i in [["{", "}"], ["[", "]"]]:
var bracket_start: int = json.find(i[0])
while bracket_start != -1:
var bracket_end: int = json.find("\n", bracket_start)
var bracket_count := 0
while bracket_end != - 1:
if json[bracket_end - 1] == i[0]:
bracket_count += 1
elif json[bracket_end + 1] == i[1]:
bracket_count -= 1
# Move through the indentation to see if there is a match.
while json[bracket_end + 1] == indentation[0]:
bracket_end += 1
if json[bracket_end + 1] == i[1]:
bracket_count -= 1
if bracket_count <= 0:
break
bracket_end = json.find("\n", bracket_end + 1)
# Skip one newline so the end bracket doesn't get indented.
bracket_end = json.rfind("\n", json.rfind("\n", bracket_end) - 1)
while bracket_end > bracket_start:
json = json.insert(bracket_end + 1, indentation)
bracket_end = json.rfind("\n", bracket_end - 1)
bracket_start = json.find(i[0], bracket_start + 1)
return json
# Takes valid JSON (if invalid, it will return a error according with Godot's
# 'validade_json()' method), returning JSON in a single line.
static func uglify_json(json: String) -> String:
var quotation_start := -1
var char_position := 0
for i in json:
# Avoid formating inside strings.
if i == "\"":
if quotation_start == -1:
quotation_start = char_position
elif json[char_position - 1] != "\\":
quotation_start = -1
char_position += 1
continue
elif quotation_start != -1:
char_position += 1
continue
if i == " " or i == "\n" or i == "\t":
json[char_position] = ""
char_position -= 1
char_position += 1
return json

View File

@ -8,22 +8,17 @@
config_version=4 config_version=4
_global_script_classes=[ {
"base": "Reference",
"class": "JSONBeautifier",
"language": "GDScript",
"path": "res://ThirdParty/json_beautifier.gd"
} ]
_global_script_class_icons={
"JSONBeautifier": ""
}
[application] [application]
config/name="Parser" config/name="Parser"
run/main_scene="res://Lecture.3.tscn" run/main_scene="res://Persistent.tscn"
boot_splash/image="res://Branding/RDP_Class_cover_small.png" boot_splash/image="res://Assets/Branding/RDP_Class_cover_small.png"
config/icon="res://Branding/RDP_Class_cover_small.png" config/icon="res://Assets/Branding/RDP_Class_cover_small.png"
[autoload]
GScene="*res://Lectures/Lecture.tscn"
GScript="*res://Lectures/Lecture.6.gd"
[gui] [gui]

5
build_engine.bat Normal file
View File

@ -0,0 +1,5 @@
cd Engine\gd\
scons -j%NUMBER_OF_PROCESSORS% platform=windows
exit

View File

@ -1,5 +1,3 @@
cd Engine\gd\bin\ start Engine\gd\bin\godot.windows.tools.64.exe -e Editor/project.godot
start godot.windows.tools.64.exe
exit

12
logs/godot.log Normal file
View File

@ -0,0 +1,12 @@
Godot Engine v3.3.4.stable.official.faf3f883d - https://godotengine.org
OpenGL ES 3.0 Renderer: NVIDIA GeForce GTX 1080/PCIe/SSE2
OpenGL ES Batching: ON
**ERROR**: Condition "err" is true. Returned: err
At: modules/gdscript/gdscript.cpp:815:load_source_code() - Condition "err" is true. Returned: err
**ERROR**: Cannot load source code from file 'C:/Projects/Study/LangStudies/Editor/Lecture.3.gd'.
At: modules/gdscript/gdscript.cpp:2241:load() - Condition "err != OK" is true. Returned: RES()
**ERROR**: Failed loading resource: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd. Make sure resources have been imported by opening the project in the editor at least once.
At: core/io/resource_loader.cpp:282:_load() - Condition "found" is true. Returned: RES()
**ERROR**: Can't load script: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd
At: main/main.cpp:1658:start() - Condition "script_res.is_null()" is true. Returned: false

View File

@ -0,0 +1,7 @@
Godot Engine v3.3.4.stable.official.faf3f883d - https://godotengine.org
OpenGL ES 3.0 Renderer: NVIDIA GeForce GTX 1080/PCIe/SSE2
OpenGL ES Batching: ON
Project is missing: C:/Projects/Study/Parsing/Editor/project.godot
**ERROR**: Condition "default_certs != __null" is true.
At: modules/mbedtls/crypto_mbedtls.cpp:201:load_default_certificates() - Condition "default_certs != __null" is true.

View File

@ -0,0 +1,12 @@
Godot Engine v3.3.4.stable.official.faf3f883d - https://godotengine.org
OpenGL ES 3.0 Renderer: NVIDIA GeForce GTX 1080/PCIe/SSE2
OpenGL ES Batching: ON
**ERROR**: Condition "err" is true. Returned: err
At: modules/gdscript/gdscript.cpp:815:load_source_code() - Condition "err" is true. Returned: err
**ERROR**: Cannot load source code from file 'C:/Projects/Study/LangStudies/Editor/Lecture.3.gd'.
At: modules/gdscript/gdscript.cpp:2241:load() - Condition "err != OK" is true. Returned: RES()
**ERROR**: Failed loading resource: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd. Make sure resources have been imported by opening the project in the editor at least once.
At: core/io/resource_loader.cpp:282:_load() - Condition "found" is true. Returned: RES()
**ERROR**: Can't load script: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd
At: main/main.cpp:1658:start() - Condition "script_res.is_null()" is true. Returned: false