diff --git a/Editor/Branding/RDP_Class_cover_small.png b/Editor/Assets/Branding/RDP_Class_cover_small.png similarity index 100% rename from Editor/Branding/RDP_Class_cover_small.png rename to Editor/Assets/Branding/RDP_Class_cover_small.png diff --git a/Editor/Branding/RDP_Class_cover_small.png.import b/Editor/Assets/Branding/RDP_Class_cover_small.png.import similarity index 67% rename from Editor/Branding/RDP_Class_cover_small.png.import rename to Editor/Assets/Branding/RDP_Class_cover_small.png.import index edd3712..457eec4 100644 --- a/Editor/Branding/RDP_Class_cover_small.png.import +++ b/Editor/Assets/Branding/RDP_Class_cover_small.png.import @@ -2,15 +2,15 @@ importer="texture" type="StreamTexture" -path="res://.import/RDP_Class_cover_small.png-51d9e4e36c8441da2486970409e2a06b.stex" +path="res://.import/RDP_Class_cover_small.png-ab70c489e9b3c0feb8bbeb581c2176f1.stex" metadata={ "vram_texture": false } [deps] -source_file="res://Branding/RDP_Class_cover_small.png" -dest_files=[ "res://.import/RDP_Class_cover_small.png-51d9e4e36c8441da2486970409e2a06b.stex" ] +source_file="res://Assets/Branding/RDP_Class_cover_small.png" +dest_files=[ "res://.import/RDP_Class_cover_small.png-ab70c489e9b3c0feb8bbeb581c2176f1.stex" ] [params] diff --git a/Editor/Assets/Fonts/DF_RecMonoSemiCasul.tres b/Editor/Assets/Fonts/DF_RecMonoSemiCasul.tres new file mode 100644 index 0000000..ac33e7a --- /dev/null +++ b/Editor/Assets/Fonts/DF_RecMonoSemiCasul.tres @@ -0,0 +1,9 @@ +[gd_resource type="DynamicFont" load_steps=2 format=2] + +[sub_resource type="DynamicFontData" id=1] +font_path = "res://Assets/Fonts/RecMonoSemicasual-Regular-1.084.ttf" + +[resource] +size = 14 +use_mipmaps = true +font_data = SubResource( 1 ) diff --git a/Editor/Assets/Fonts/RecMonoSemicasual-Bold-1.084.ttf b/Editor/Assets/Fonts/RecMonoSemicasual-Bold-1.084.ttf new file mode 100644 index 0000000..192dee2 Binary files /dev/null and b/Editor/Assets/Fonts/RecMonoSemicasual-Bold-1.084.ttf differ diff --git a/Editor/Assets/Fonts/RecMonoSemicasual-BoldItalic-1.084.ttf b/Editor/Assets/Fonts/RecMonoSemicasual-BoldItalic-1.084.ttf new file mode 100644 index 0000000..e910856 Binary files /dev/null and b/Editor/Assets/Fonts/RecMonoSemicasual-BoldItalic-1.084.ttf differ diff --git a/Editor/Assets/Fonts/RecMonoSemicasual-Italic-1.084.ttf b/Editor/Assets/Fonts/RecMonoSemicasual-Italic-1.084.ttf new file mode 100644 index 0000000..a2acc87 Binary files /dev/null and b/Editor/Assets/Fonts/RecMonoSemicasual-Italic-1.084.ttf differ diff --git a/Editor/Assets/Fonts/RecMonoSemicasual-Regular-1.084.ttf b/Editor/Assets/Fonts/RecMonoSemicasual-Regular-1.084.ttf new file mode 100644 index 0000000..2b728a7 Binary files /dev/null and b/Editor/Assets/Fonts/RecMonoSemicasual-Regular-1.084.ttf differ diff --git a/Editor/Assets/Styles/Editor.SytleBoxFlat.tres b/Editor/Assets/Styles/Editor.SytleBoxFlat.tres new file mode 100644 index 0000000..4245035 --- /dev/null +++ b/Editor/Assets/Styles/Editor.SytleBoxFlat.tres @@ -0,0 +1,4 @@ +[gd_resource type="StyleBoxFlat" format=2] + +[resource] +bg_color = Color( 0.0941176, 0.0666667, 0.137255, 1 ) diff --git a/Editor/Assets/Styles/EditorTheme.tres b/Editor/Assets/Styles/EditorTheme.tres new file mode 100644 index 0000000..0f4ffba --- /dev/null +++ b/Editor/Assets/Styles/EditorTheme.tres @@ -0,0 +1,11 @@ +[gd_resource type="Theme" load_steps=3 format=2] + +[ext_resource path="res://Assets/Styles/Editor.SytleBoxFlat.tres" type="StyleBox" id=1] +[ext_resource path="res://Assets/Fonts/DF_RecMonoSemiCasul.tres" type="DynamicFont" id=2] + +[resource] +TextEdit/colors/font_color = Color( 1, 1, 1, 1 ) +TextEdit/colors/font_color_readonly = Color( 1, 1, 1, 1 ) +TextEdit/fonts/font = ExtResource( 2 ) +TextEdit/styles/normal = ExtResource( 1 ) +TextEdit/styles/read_only = ExtResource( 1 ) diff --git a/Editor/Lecture.1.tscn b/Editor/Lecture.1.tscn deleted file mode 100644 index b38ab62..0000000 --- a/Editor/Lecture.1.tscn +++ /dev/null @@ -1,6 +0,0 @@ -[gd_scene load_steps=2 format=2] - -[ext_resource path="res://Lecture.1.gd" type="Script" id=1] - -[node name="Test" type="Node2D"] -script = ExtResource( 1 ) diff --git a/Editor/Lecture.2.tscn b/Editor/Lecture.2.tscn deleted file mode 100644 index aaf5a19..0000000 --- a/Editor/Lecture.2.tscn +++ /dev/null @@ -1,6 +0,0 @@ -[gd_scene load_steps=2 format=2] - -[ext_resource path="res://Lecture.2.gd" type="Script" id=1] - -[node name="Test" type="Node2D"] -script = ExtResource( 1 ) diff --git a/Editor/Lecture.3.tscn b/Editor/Lecture.3.tscn deleted file mode 100644 index ae2465e..0000000 --- a/Editor/Lecture.3.tscn +++ /dev/null @@ -1,8 +0,0 @@ -[gd_scene load_steps=2 format=2] - -[ext_resource path="res://Lecture.3..gd" type="Script" id=1] - -[node name="Control" type="Control"] -anchor_right = 1.0 -anchor_bottom = 1.0 -script = ExtResource( 1 ) diff --git a/Editor/Lecture.1.gd b/Editor/Lectures/Lecture.1.gd similarity index 100% rename from Editor/Lecture.1.gd rename to Editor/Lectures/Lecture.1.gd diff --git a/Editor/Lecture.2.gd b/Editor/Lectures/Lecture.2.gd similarity index 95% rename from Editor/Lecture.2.gd rename to Editor/Lectures/Lecture.2.gd index 43e4c1a..fd65dcd 100644 --- a/Editor/Lecture.2.gd +++ b/Editor/Lectures/Lecture.2.gd @@ -1,7 +1,5 @@ extends Node -const JsonBeautifier = preload("res://ThirdParty/json_beautifier.gd") - # This closesly follows the source provided in the lectures. # Later on after the lectures are complete or when I deem # Necessary there will be heavy refactors. @@ -194,14 +192,14 @@ func _ready(): GTokenizer.init(ProgramDescription) var ast = GParser.parse(GTokenizer) - print(JsonBeautifier.beautify_json(to_json(ast.toDict()))) + print(JSON.print(ast.toDict(), "\t")) # String Test ProgramDescription = "\"hello\"" GTokenizer.init(ProgramDescription) ast = GParser.parse(GTokenizer) - print(JsonBeautifier.beautify_json(to_json(ast.toDict()))) + print(JSON.print(ast.toDict(), "\t")) # Called every frame. 'delta' is the elapsed time since the previous frame. diff --git a/Editor/Lecture.3..gd b/Editor/Lectures/Lecture.3..gd similarity index 97% rename from Editor/Lecture.3..gd rename to Editor/Lectures/Lecture.3..gd index ba2b549..faed3bd 100644 --- a/Editor/Lecture.3..gd +++ b/Editor/Lectures/Lecture.3..gd @@ -1,7 +1,5 @@ extends Node -const JsonBeautifier = preload("res://ThirdParty/json_beautifier.gd") - # This closesly follows the source provided in the lectures. # Later on after the lectures are complete or when I deem # Necessary there will be heavy refactors. @@ -208,7 +206,7 @@ func test(): var ast = GParser.parse(GTokenizer) - print(JsonBeautifier.beautify_json(to_json(ast.toDict()))) + print(JSON.print(ast.toDict(), "\t")) # Main Entry point. diff --git a/Editor/Lectures/Lecture.4.gd b/Editor/Lectures/Lecture.4.gd new file mode 100644 index 0000000..21278fa --- /dev/null +++ b/Editor/Lectures/Lecture.4.gd @@ -0,0 +1,307 @@ +extends Node + +# This closesly follows the source provided in the lectures. +# Later on after the lectures are complete or when I deem +# Necessary there will be heavy refactors. + +const TokenType = \ +{ + Program = "Program", + + # Comments + CommentLine = "CommentLine", + CommentMultiLine = "CommentMultiLine", + + # Formatting + Whitespace = "Whitespace", + + # Statements + StatementEnd = "StatementEnd", + + # Literals + Number = "Number", + String = "String" +} + +const TokenSpec = \ +{ + TokenType.CommentLine : "^\/\/.*", + TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/", + TokenType.Whitespace : "^\\s+", + TokenType.Number : "\\d+", + TokenType.String : "^\"[^\"]*\"", + TokenType.StatementEnd : "^;" +} + +class Token: + var Type : String + var Value : String + + func toDict(): + var result = \ + { + Type = self.Type, + Value = self.Value + } + return result + +class Tokenizer: + var SrcTxt : String + var Cursor : int; + + # Sets up the tokenizer with the program source text. + func init(programSrcText): + SrcTxt = programSrcText + Cursor = 0 + + # Provides the next token in the source text. + func next_Token(): + if self.reached_EndOfTxt() == true : + return null + + var srcLeft = self.SrcTxt.substr(Cursor) + var regex = RegEx.new() + var token = Token.new() + + for type in TokenSpec : + regex.compile(TokenSpec[type]) + + var result = regex.search(srcLeft) + if result == null || result.get_start() != 0 : + continue + + # Skip Comments + if type == TokenType.CommentLine || type == TokenType.CommentMultiLine : + self.Cursor += result.get_string().length() + return next_Token() + + # Skip Whitespace + if type == TokenType.Whitespace : + var addVal = result.get_string().length() + self.Cursor += addVal + + return next_Token() + + token.Type = type + token.Value = result.get_string() + self.Cursor += ( result.get_string().length() ) + + return token + + var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}" + var assertStr = assertStrTmplt.format({"value" : self.Cursor}) + assert(true != true, assertStr) + return null + + func reached_EndOfTxt(): + return self.Cursor >= ( self.SrcTxt.length() - 1 ) + +var GTokenizer = Tokenizer.new() + + +const SyntaxNodeType = \ +{ + NumericLiteral = "NumericLiteral", + StringLiteral = "StringLiteral", + ExpressionStatement = "ExpressionStatement" +} + +class SyntaxNode: + var Type : String + var Value # Not specifing a type implicity declares a Variant type. + + func toDict(): + var ValueDict = self.Value + if typeof(Value) == TYPE_ARRAY : + var dict = {} + var index = 0 + for entry in self.Value : + dict[index] = entry.toDict() + index += 1 + + ValueDict = dict + + var result = \ + { + Type = self.Type, + Value = ValueDict + } + return result + +class ProgramNode: + var Type : String + var Body : Object + + func toDict(): + var result = \ + { + Type = self.Type, + Body = self.Body.toDict() + } + return result + +class Parser: + var TokenizerRef : Tokenizer + var NextToken : Token + + func eat(tokenType): + var currToken = self.NextToken + + assert(currToken != null, "eat: NextToken was null") + + var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}" + var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType}) + + assert(currToken.Type == tokenType, assertStr) + + self.NextToken = self.TokenizerRef.next_Token() + + return currToken + + # Literal + # : NumericLiteral + # : StringLiteral + # ; + # + func parse_Literal(): + match NextToken.Type : + TokenType.Number: + return parse_NumericLiteral() + TokenType.String: + return parse_StringLiteral() + + assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken") + + # NumericLiteral + # : Number + # ; + # + func parse_NumericLiteral(): + var Token = eat(TokenType.Number) + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.NumericLiteral + node.Value = int( Token.Value ) + + return node + + # StringLiteral + # : String + # ; + # + func parse_StringLiteral(): + var Token = eat(TokenType.String) + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.StringLiteral + node.Value = Token.Value.substr( 1, Token.Value.length() - 2 ) + + return node + + # Expression + # : Literal + # ; + # + func parse_Expression(): + return parse_Literal() + + # ExpressionStatement + # : Expression + # ; + # + func parse_ExpressionStatement(): + var expression = parse_Expression() + eat(TokenType.StatementEnd) + + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.ExpressionStatement + node.Value = expression + + return expression + + # Statement + # : ExpressionStatement + # ; + # + func parse_Statement(): + return parse_ExpressionStatement() + + # StatementList + # : Statement + # | StatementList Statement -> Statement ... + # ; + # + func parse_StatementList(): + var statementList = [ parse_Statement() ] + + while NextToken != null : + statementList.append( parse_Statement() ) + + var \ + node = SyntaxNode.new() + node.Type = "StatementList" + node.Value = statementList + + return node + + # Program + # : StatementList + # : Literal + # ; + # + func parse_Program(): + var \ + node = ProgramNode.new() + node.Type = TokenType.Program + node.Body = parse_StatementList() + + return node + + # Parses the text program description into an AST. + func parse(TokenizerRef): + self.TokenizerRef = TokenizerRef + + NextToken = TokenizerRef.next_Token() + + return parse_Program() + +var GParser = Parser.new() + +const Tests = \ +{ + MultiStatement = \ + { + Name = "Multi-Statement", + File = "1.Multi-Statement.uf" + } +} + +func test(entry): + var introMessage = "Testing: {Name}" + var introMessageFormatted = introMessage.format({"Name" : entry.Name}) + print(introMessageFormatted) + + var path = "res://Tests/{TestName}" + var pathFormatted = path.format({"TestName" : entry.File}) + + var \ + file = File.new() + file.open(pathFormatted, File.READ) + + var programDescription = file.get_as_text() + file.close() + + GTokenizer.init(programDescription) + var ast = GParser.parse(GTokenizer) + + var json = JSON.print(ast.toDict(), "\t") + + print(JSON.print(ast.toDict(), "\t")) + print("Passed!\n") + + +# Main Entry point. +func _ready(): + for Key in Tests : + test(Tests[Key]) diff --git a/Editor/Lectures/Lecture.5.gd b/Editor/Lectures/Lecture.5.gd new file mode 100644 index 0000000..4284ff5 --- /dev/null +++ b/Editor/Lectures/Lecture.5.gd @@ -0,0 +1,373 @@ +extends Node + +# This closesly follows the source provided in the lectures. +# Later on after the lectures are complete or when I deem +# Necessary there will be heavy refactors. + +const TokenType = \ +{ + Program = "Program", + + # Comments + CommentLine = "CommentLine", + CommentMultiLine = "CommentMultiLine", + + # Formatting + Whitespace = "Whitespace", + + # Statements + StatementEnd = "StatementEnd", + StmtBlockStart = "BlockStatementStart", + StmtBlockEnd = "BlockStatementEnd", + + # Literals + Number = "Number", + String = "String" +} + +const TokenSpec = \ +{ + TokenType.CommentLine : "^\/\/.*", + TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/", + TokenType.Whitespace : "^\\s+", + TokenType.Number : "\\d+", + TokenType.String : "^\"[^\"]*\"", + TokenType.StatementEnd : "^;", + TokenType.StmtBlockStart : "^{", + TokenType.StmtBlockEnd : "^}" +} + +class Token: + var Type : String + var Value : String + + func toDict(): + var result = \ + { + Type = self.Type, + Value = self.Value + } + return result + +class Tokenizer: + var SrcTxt : String + var Cursor : int; + + # Sets up the tokenizer with the program source text. + func init(programSrcText): + SrcTxt = programSrcText + Cursor = 0 + + # Provides the next token in the source text. + func next_Token(): + if self.reached_EndOfTxt() == true : + return null + + var srcLeft = self.SrcTxt.substr(Cursor) + var regex = RegEx.new() + var token = Token.new() + + for type in TokenSpec : + regex.compile(TokenSpec[type]) + + var result = regex.search(srcLeft) + if result == null || result.get_start() != 0 : + continue + + # Skip Comments + if type == TokenType.CommentLine || type == TokenType.CommentMultiLine : + self.Cursor += result.get_string().length() + return next_Token() + + # Skip Whitespace + if type == TokenType.Whitespace : + var addVal = result.get_string().length() + self.Cursor += addVal + + return next_Token() + + token.Type = type + token.Value = result.get_string() + self.Cursor += ( result.get_string().length() ) + + return token + + var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}" + var assertStr = assertStrTmplt.format({"value" : self.Cursor}) + assert(true != true, assertStr) + return null + + func reached_EndOfTxt(): + return self.Cursor >= ( self.SrcTxt.length() ) + +var GTokenizer = Tokenizer.new() + + +const SyntaxNodeType = \ +{ + NumericLiteral = "NumericLiteral", + StringLiteral = "StringLiteral", + ExpressionStatement = "ExpressionStatement", + BlockStatement = "BlockStatement", + EmptyStatement = "EmptyStatement" +} + +class SyntaxNode: + var Type : String + var Value # Not specifing a type implicity declares a Variant type. + + func toDict(): + var ValueDict = self.Value + if typeof(Value) == TYPE_ARRAY : + var dict = {} + var index = 0 + for entry in self.Value : + dict[index] = entry.toDict() + index += 1 + + ValueDict = dict + + if typeof(Value) == TYPE_OBJECT : + var reuslt = \ + { + Type = self.Type, + Value = self.Value.toDict() + } + return reuslt + + var result = \ + { + Type = self.Type, + Value = ValueDict + } + return result + +class ProgramNode: + var Type : String + var Body : Object + + func toDict(): + var result = \ + { + Type = self.Type, + Body = self.Body.toDict() + } + return result + +class Parser: + var TokenizerRef : Tokenizer + var NextToken : Token + + func eat(tokenType): + var currToken = self.NextToken + + assert(currToken != null, "eat: NextToken was null") + + var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}" + var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType}) + + assert(currToken.Type == tokenType, assertStr) + + self.NextToken = self.TokenizerRef.next_Token() + + return currToken + + # Literal + # : NumericLiteral + # : StringLiteral + # ; + # + func parse_Literal(): + match NextToken.Type : + TokenType.Number: + return parse_NumericLiteral() + TokenType.String: + return parse_StringLiteral() + + assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken") + + # NumericLiteral + # : Number + # ; + # + func parse_NumericLiteral(): + var Token = eat(TokenType.Number) + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.NumericLiteral + node.Value = int( Token.Value ) + + return node + + # StringLiteral + # : String + # ; + # + func parse_StringLiteral(): + var Token = eat(TokenType.String) + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.StringLiteral + node.Value = Token.Value.substr( 1, Token.Value.length() - 2 ) + + return node + + # Expression + # : Literal + # ; + # + func parse_Expression(): + return parse_Literal() + + # EmptyStatement + # ; + # + func parse_EmptyStatement(): + eat(TokenType.StatementEnd) + + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.EmptyStatement + + return node + + # BlockStatement + # : { OptStatementList } + # ; + # + func parse_BlockStatement(): + eat(TokenType.StmtBlockStart) + + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.BlockStatement + + if NextToken.Type != TokenType.StmtBlockEnd : + node.Value = parse_StatementList(TokenType.StmtBlockEnd) + else : + node.Value = [] + + eat(TokenType.StmtBlockEnd) + + return node + + # ExpressionStatement + # : Expression + # ; + # + func parse_ExpressionStatement(): + var expression = parse_Expression() + eat(TokenType.StatementEnd) + + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.ExpressionStatement + node.Value = expression + + return expression + + # Statement + # : ExpressionStatement + # : BlockStatement + # : EmptyStatement + # ; + # + func parse_Statement(): + match NextToken.Type : + TokenType.StatementEnd : + return parse_EmptyStatement() + TokenType.StmtBlockStart : + return parse_BlockStatement() + + return parse_ExpressionStatement() + + # StatementList + # : Statement + # | StatementList Statement -> Statement ... + # ; + # + func parse_StatementList(endToken): + var statementList = [ parse_Statement() ] + + while NextToken != null && NextToken.Type != endToken : + statementList.append( parse_Statement() ) + + var \ + node = SyntaxNode.new() + node.Type = "StatementList" + node.Value = statementList + + return node + + # Program + # : StatementList + # : Literal + # ; + # + func parse_Program(): + var \ + node = ProgramNode.new() + node.Type = TokenType.Program + node.Body = parse_StatementList(null) + + return node + + # Parses the text program description into an AST. + func parse(TokenizerRef): + self.TokenizerRef = TokenizerRef + + NextToken = TokenizerRef.next_Token() + + return parse_Program() + +var GParser = Parser.new() + + + +onready var TextOut = GScene.get_node("TextOutput") + +func tout(text): + TextOut.insert_text_at_cursor(text) + +const Tests = \ +{ + MultiStatement = \ + { + Name = "Multi-Statement", + File = "1.Multi-Statement.uf" + }, + BlockStatement = \ + { + Name = "Block Statement", + File = "2.BlockStatement.uf" + } +} + +func test(entry): + var introMessage = "Testing: {Name}\n" + var introMessageFormatted = introMessage.format({"Name" : entry.Name}) + tout(introMessageFormatted) + + var path = "res://Tests/{TestName}" + var pathFormatted = path.format({"TestName" : entry.File}) + + var \ + file = File.new() + file.open(pathFormatted, File.READ) + + var programDescription = file.get_as_text() + file.close() + + GTokenizer.init(programDescription) + var ast = GParser.parse(GTokenizer) + + var json = JSON.print(ast.toDict(), "\t") + + tout(json + "\n") + tout("Passed!\n") + + +# Main Entry point. +func _ready(): + for Key in Tests : + test(Tests[Key]) diff --git a/Editor/Lectures/Lecture.6.gd b/Editor/Lectures/Lecture.6.gd new file mode 100644 index 0000000..0adf86c --- /dev/null +++ b/Editor/Lectures/Lecture.6.gd @@ -0,0 +1,382 @@ +extends Node + +# This closesly follows the source provided in the lectures. +# Later on after the lectures are complete or when I deem +# Necessary there will be heavy refactors. + +const TokenType = \ +{ + Program = "Program", + + # Comments + CommentLine = "CommentLine", + CommentMultiLine = "CommentMultiLine", + + # Formatting + Whitespace = "Whitespace", + + # Statements + StatementEnd = "StatementEnd", + StmtBlockStart = "BlockStatementStart", + StmtBlockEnd = "BlockStatementEnd", + + # Literals + Number = "Number", + String = "String" +} + +const TokenSpec = \ +{ + TokenType.CommentLine : "^\/\/.*", + TokenType.CommentMultiLine : "^\/\\*[\\s\\S]*?\\*\/", + TokenType.Whitespace : "^\\s+", + TokenType.Number : "\\d+", + TokenType.String : "^\"[^\"]*\"", + TokenType.StatementEnd : "^;", + TokenType.StmtBlockStart : "^{", + TokenType.StmtBlockEnd : "^}" +} + +class Token: + var Type : String + var Value : String + + func to_Dictionary(): + var result = \ + { + Type = self.Type, + Value = self.Value + } + return result + +class Tokenizer: + var SrcTxt : String + var Cursor : int; + + # Sets up the tokenizer with the program source text. + func init(programSrcText): + SrcTxt = programSrcText + Cursor = 0 + + # Provides the next token in the source text. + func next_Token(): + if self.reached_EndOfTxt() == true : + return null + + var srcLeft = self.SrcTxt.substr(Cursor) + var regex = RegEx.new() + var token = Token.new() + + for type in TokenSpec : + regex.compile(TokenSpec[type]) + + var result = regex.search(srcLeft) + if result == null || result.get_start() != 0 : + continue + + # Skip Comments + if type == TokenType.CommentLine || type == TokenType.CommentMultiLine : + self.Cursor += result.get_string().length() + return next_Token() + + # Skip Whitespace + if type == TokenType.Whitespace : + var addVal = result.get_string().length() + self.Cursor += addVal + + return next_Token() + + token.Type = type + token.Value = result.get_string() + self.Cursor += ( result.get_string().length() ) + + return token + + var assertStrTmplt = "next_token: Source text not understood by tokenizer at Cursor pos: {value}" + var assertStr = assertStrTmplt.format({"value" : self.Cursor}) + assert(true != true, assertStr) + return null + + func reached_EndOfTxt(): + return self.Cursor >= ( self.SrcTxt.length() ) + +var GTokenizer = Tokenizer.new() + + + +const AST_Format = \ +{ + Dictionary = "Dictionary", + SExpression = "S-Expression" +} + +const SyntaxNodeType = \ +{ + NumericLiteral = "NumericLiteral", + StringLiteral = "StringLiteral", + ExpressionStatement = "ExpressionStatement", + BlockStatement = "BlockStatement", + EmptyStatement = "EmptyStatement" +} + +class SyntaxNode: + var Type : String + var Value # Not specifing a type implicity declares a Variant type. + + func to_SExpression(): + var expression = [ Type ] + + if typeof(Value) == TYPE_ARRAY : + var array = [] + for entry in self.Value : + array.append( entry.to_SExpression() ) + + expression.append(array) + return expression + + if typeof(Value) == TYPE_OBJECT : + var result = [ Type, Value.to_SExpression() ] + return result + + expression.append(Value) + return expression + + func to_Dictionary(): + if typeof(Value) == TYPE_ARRAY : + var array = [] + for entry in self.Value : + array.append(entry.to_Dictionary()) + var result = \ + { + Type = self.Type, + Value = array + } + return result + + if typeof(Value) == TYPE_OBJECT : + var result = \ + { + Type = self.Type, + Value = self.Value.to_Dictionary() + } + return result + + var result = \ + { + Type = self.Type, + Value = self.Value + } + return result + +class Parser: + var TokenizerRef : Tokenizer + var NextToken : Token + + func eat(tokenType): + var currToken = self.NextToken + + assert(currToken != null, "eat: NextToken was null") + + var assertStrTmplt = "eat: Unexpected token: {value}, expected: {type}" + var assertStr = assertStrTmplt.format({"value" : currToken.Value, "type" : tokenType}) + + assert(currToken.Type == tokenType, assertStr) + + self.NextToken = self.TokenizerRef.next_Token() + + return currToken + + # Literal + # : NumericLiteral + # : StringLiteral + # ; + # + func parse_Literal(): + match NextToken.Type : + TokenType.Number: + return parse_NumericLiteral() + TokenType.String: + return parse_StringLiteral() + + assert(false, "parse_Literal: Was not able to detect valid literal type from NextToken") + + # NumericLiteral + # : Number + # ; + # + func parse_NumericLiteral(): + var Token = eat(TokenType.Number) + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.NumericLiteral + node.Value = int( Token.Value ) + + return node + + # StringLiteral + # : String + # ; + # + func parse_StringLiteral(): + var Token = eat(TokenType.String) + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.StringLiteral + node.Value = Token.Value.substr( 1, Token.Value.length() - 2 ) + + return node + + # Expression + # : Literal + # ; + # + func parse_Expression(): + return parse_Literal() + + # EmptyStatement + # ; + # + func parse_EmptyStatement(): + eat(TokenType.StatementEnd) + + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.EmptyStatement + + return node + + # BlockStatement + # : { OptStatementList } + # ; + # + func parse_BlockStatement(): + eat(TokenType.StmtBlockStart) + + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.BlockStatement + + if NextToken.Type != TokenType.StmtBlockEnd : + node.Value = parse_StatementList(TokenType.StmtBlockEnd) + else : + node.Value = [] + + eat(TokenType.StmtBlockEnd) + + return node + + # ExpressionStatement + # : Expression + # ; + # + func parse_ExpressionStatement(): + var expression = parse_Expression() + eat(TokenType.StatementEnd) + + var \ + node = SyntaxNode.new() + node.Type = SyntaxNodeType.ExpressionStatement + node.Value = expression + + return expression + + # Statement + # : ExpressionStatement + # : BlockStatement + # : EmptyStatement + # ; + # + func parse_Statement(): + match NextToken.Type : + TokenType.StatementEnd : + return parse_EmptyStatement() + TokenType.StmtBlockStart : + return parse_BlockStatement() + + return parse_ExpressionStatement() + + # StatementList + # : Statement + # | StatementList Statement -> Statement ... + # ; + # + func parse_StatementList(endToken): + var statementList = [ parse_Statement() ] + + while NextToken != null && NextToken.Type != endToken : + statementList.append( parse_Statement() ) + + return statementList + + # Program + # : StatementList + # : Literal + # ; + # + func parse_Program(): + var \ + node = SyntaxNode.new() + node.Type = TokenType.Program + node.Value = parse_StatementList(null) + + return node + + # Parses the text program description into an AST. + func parse(TokenizerRef): + self.TokenizerRef = TokenizerRef + + NextToken = TokenizerRef.next_Token() + + return parse_Program() + +var GParser = Parser.new() + + + +onready var TextOut = GScene.get_node("TextOutput") + +func tout(text): + TextOut.insert_text_at_cursor(text) + +const Tests = \ +{ + MultiStatement = \ + { + Name = "Multi-Statement", + File = "1.Multi-Statement.uf" + }, + BlockStatement = \ + { + Name = "Block Statement", + File = "2.BlockStatement.uf" + } +} + +func test(entry): + var introMessage = "Testing: {Name}\n" + var introMessageFormatted = introMessage.format({"Name" : entry.Name}) + tout(introMessageFormatted) + + var path = "res://Tests/{TestName}" + var pathFormatted = path.format({"TestName" : entry.File}) + + var \ + file = File.new() + file.open(pathFormatted, File.READ) + + var programDescription = file.get_as_text() + file.close() + + GTokenizer.init(programDescription) + var ast = GParser.parse(GTokenizer) + + var json = JSON.print(ast.to_SExpression(), '\t') + + tout(json + "\n") + tout("Passed!\n") + + +# Main Entry point. +func _ready(): + for Key in Tests : + test(Tests[Key]) diff --git a/Editor/Lectures/Lecture.tscn b/Editor/Lectures/Lecture.tscn new file mode 100644 index 0000000..7abed12 --- /dev/null +++ b/Editor/Lectures/Lecture.tscn @@ -0,0 +1,27 @@ +[gd_scene load_steps=3 format=2] + +[ext_resource path="res://Assets/Styles/EditorTheme.tres" type="Theme" id=1] +[ext_resource path="res://Assets/Branding/RDP_Class_cover_small.png" type="Texture" id=2] + +[node name="Control" type="Control"] +anchor_right = 1.0 +anchor_bottom = 1.0 + +[node name="CourseBrand" type="TextureRect" parent="."] +anchor_right = 1.0 +anchor_bottom = 1.0 +rect_scale = Vector2( 0.25, 0.25 ) +texture = ExtResource( 2 ) +expand = true +stretch_mode = 6 + +[node name="TextOutput" type="TextEdit" parent="."] +anchor_left = 0.25 +anchor_right = 1.0 +anchor_bottom = 1.0 +grow_horizontal = 0 +theme = ExtResource( 1 ) +readonly = true +highlight_current_line = true +show_line_numbers = true +minimap_draw = true diff --git a/Editor/Persistent.tscn b/Editor/Persistent.tscn new file mode 100644 index 0000000..0996c02 --- /dev/null +++ b/Editor/Persistent.tscn @@ -0,0 +1,3 @@ +[gd_scene format=2] + +[node name="P-Root" type="Node"] diff --git a/Editor/Tests/1.Multi-Statement.uf b/Editor/Tests/1.Multi-Statement.uf new file mode 100644 index 0000000..2daaac6 --- /dev/null +++ b/Editor/Tests/1.Multi-Statement.uf @@ -0,0 +1,8 @@ +// Testing a comment +"Hello World!"; + +/** +* +* Testing a comment +*/ +42; diff --git a/Editor/Tests/2.BlockStatement.uf b/Editor/Tests/2.BlockStatement.uf new file mode 100644 index 0000000..cec4943 --- /dev/null +++ b/Editor/Tests/2.BlockStatement.uf @@ -0,0 +1,18 @@ +{ + 42; + "Hello World!"; +} + +{ + +} + +{ + 42; + { + "Hello World!"; + } + { + ; + } +} \ No newline at end of file diff --git a/Editor/Tests/3.BinaryExpression.uf b/Editor/Tests/3.BinaryExpression.uf new file mode 100644 index 0000000..dfc7ebc --- /dev/null +++ b/Editor/Tests/3.BinaryExpression.uf @@ -0,0 +1,7 @@ +// Binary expression +2 + 2; + +// Nested binary expressions: +// left : 3 + 2 +// right : 2 +3 + 2 - 2; \ No newline at end of file diff --git a/Editor/Tests/4.Assignment.uf b/Editor/Tests/4.Assignment.uf new file mode 100644 index 0000000..bebb747 --- /dev/null +++ b/Editor/Tests/4.Assignment.uf @@ -0,0 +1,5 @@ +// Single +x = 42; + +// Chained +x = (y = 42); \ No newline at end of file diff --git a/Editor/Tests/5.Conditionals.uf b/Editor/Tests/5.Conditionals.uf new file mode 100644 index 0000000..ec3e854 --- /dev/null +++ b/Editor/Tests/5.Conditionals.uf @@ -0,0 +1,8 @@ +if (x) +{ + x = 1; +} +else +{ + x = 2; +} \ No newline at end of file diff --git a/Editor/Tests/6.Relations.uf b/Editor/Tests/6.Relations.uf new file mode 100644 index 0000000..3b59d28 --- /dev/null +++ b/Editor/Tests/6.Relations.uf @@ -0,0 +1 @@ +x > 0; \ No newline at end of file diff --git a/Editor/Tests/7.VariableDeclaration.uf b/Editor/Tests/7.VariableDeclaration.uf new file mode 100644 index 0000000..d056e89 --- /dev/null +++ b/Editor/Tests/7.VariableDeclaration.uf @@ -0,0 +1,8 @@ +// Declaration +let x; + +// Single assignment declaration +let y = 42; + +// Multiple declarations +let a, b; \ No newline at end of file diff --git a/Editor/ThirdParty/json_beautifier.gd b/Editor/ThirdParty/json_beautifier.gd deleted file mode 100644 index 71f642c..0000000 --- a/Editor/ThirdParty/json_beautifier.gd +++ /dev/null @@ -1,133 +0,0 @@ -############################################################################### -# JSON Beautifier # -# Copyright (C) 2018-2020 Michael Alexsander # -#-----------------------------------------------------------------------------# -# This Source Code Form is subject to the terms of the Mozilla Public # -# License, v. 2.0. If a copy of the MPL was not distributed with this # -# file, You can obtain one at http://mozilla.org/MPL/2.0/. # -############################################################################### - -class_name JSONBeautifier - - -# Takes valid JSON (if invalid, it will return a error according with Godot's -# 'validade_json()' method) and a number of spaces for indentation (default is -# '0', in which it will use tabs instead), returning properly formatted JSON. -static func beautify_json(json: String, spaces := 0) -> String: - var error_message: String = validate_json(json) - if not error_message.empty(): - return error_message - - var indentation := "" - if spaces > 0: - for i in spaces: - indentation += " " - else: - indentation = "\t" - - var quotation_start := -1 - var char_position := 0 - for i in json: - # Workaround a Godot quirk, as it allows JSON strings to end with a - # trailing comma. - if i == "," and char_position + 1 == json.length(): - break - - # Avoid formating inside strings. - if i == "\"": - if quotation_start == -1: - quotation_start = char_position - elif json[char_position - 1] != "\\": - quotation_start = -1 - - char_position += 1 - - continue - elif quotation_start != -1: - char_position += 1 - - continue - - match i: - # Remove pre-existing formatting. - " ", "\n", "\t": - json[char_position] = "" - char_position -= 1 - "{", "[", ",": - if json[char_position + 1] != "}" and\ - json[char_position + 1] != "]": - json = json.insert(char_position + 1, "\n") - char_position += 1 - "}", "]": - if json[char_position - 1] != "{" and\ - json[char_position - 1] != "[": - json = json.insert(char_position, "\n") - char_position += 1 - ":": - json = json.insert(char_position + 1, " ") - char_position += 1 - - char_position += 1 - - for i in [["{", "}"], ["[", "]"]]: - var bracket_start: int = json.find(i[0]) - while bracket_start != -1: - var bracket_end: int = json.find("\n", bracket_start) - var bracket_count := 0 - while bracket_end != - 1: - if json[bracket_end - 1] == i[0]: - bracket_count += 1 - elif json[bracket_end + 1] == i[1]: - bracket_count -= 1 - - # Move through the indentation to see if there is a match. - while json[bracket_end + 1] == indentation[0]: - bracket_end += 1 - - if json[bracket_end + 1] == i[1]: - bracket_count -= 1 - - if bracket_count <= 0: - break - - bracket_end = json.find("\n", bracket_end + 1) - - # Skip one newline so the end bracket doesn't get indented. - bracket_end = json.rfind("\n", json.rfind("\n", bracket_end) - 1) - while bracket_end > bracket_start: - json = json.insert(bracket_end + 1, indentation) - bracket_end = json.rfind("\n", bracket_end - 1) - - bracket_start = json.find(i[0], bracket_start + 1) - - return json - - -# Takes valid JSON (if invalid, it will return a error according with Godot's -# 'validade_json()' method), returning JSON in a single line. -static func uglify_json(json: String) -> String: - var quotation_start := -1 - var char_position := 0 - for i in json: - # Avoid formating inside strings. - if i == "\"": - if quotation_start == -1: - quotation_start = char_position - elif json[char_position - 1] != "\\": - quotation_start = -1 - - char_position += 1 - - continue - elif quotation_start != -1: - char_position += 1 - - continue - - if i == " " or i == "\n" or i == "\t": - json[char_position] = "" - char_position -= 1 - - char_position += 1 - - return json diff --git a/Editor/project.godot b/Editor/project.godot index 4b785e0..c2dfcc3 100644 --- a/Editor/project.godot +++ b/Editor/project.godot @@ -8,22 +8,17 @@ config_version=4 -_global_script_classes=[ { -"base": "Reference", -"class": "JSONBeautifier", -"language": "GDScript", -"path": "res://ThirdParty/json_beautifier.gd" -} ] -_global_script_class_icons={ -"JSONBeautifier": "" -} - [application] config/name="Parser" -run/main_scene="res://Lecture.3.tscn" -boot_splash/image="res://Branding/RDP_Class_cover_small.png" -config/icon="res://Branding/RDP_Class_cover_small.png" +run/main_scene="res://Persistent.tscn" +boot_splash/image="res://Assets/Branding/RDP_Class_cover_small.png" +config/icon="res://Assets/Branding/RDP_Class_cover_small.png" + +[autoload] + +GScene="*res://Lectures/Lecture.tscn" +GScript="*res://Lectures/Lecture.6.gd" [gui] diff --git a/build_engine.bat b/build_engine.bat new file mode 100644 index 0000000..2872469 --- /dev/null +++ b/build_engine.bat @@ -0,0 +1,5 @@ +cd Engine\gd\ + +scons -j%NUMBER_OF_PROCESSORS% platform=windows + +exit \ No newline at end of file diff --git a/editor.bat b/editor.bat index 7c9c3c4..e9aa649 100644 --- a/editor.bat +++ b/editor.bat @@ -1,5 +1,3 @@ -cd Engine\gd\bin\ +start Engine\gd\bin\godot.windows.tools.64.exe -e Editor/project.godot -start godot.windows.tools.64.exe -exit \ No newline at end of file diff --git a/logs/godot.log b/logs/godot.log new file mode 100644 index 0000000..442a41c --- /dev/null +++ b/logs/godot.log @@ -0,0 +1,12 @@ +Godot Engine v3.3.4.stable.official.faf3f883d - https://godotengine.org +OpenGL ES 3.0 Renderer: NVIDIA GeForce GTX 1080/PCIe/SSE2 +OpenGL ES Batching: ON + +**ERROR**: Condition "err" is true. Returned: err + At: modules/gdscript/gdscript.cpp:815:load_source_code() - Condition "err" is true. Returned: err +**ERROR**: Cannot load source code from file 'C:/Projects/Study/LangStudies/Editor/Lecture.3.gd'. + At: modules/gdscript/gdscript.cpp:2241:load() - Condition "err != OK" is true. Returned: RES() +**ERROR**: Failed loading resource: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd. Make sure resources have been imported by opening the project in the editor at least once. + At: core/io/resource_loader.cpp:282:_load() - Condition "found" is true. Returned: RES() +**ERROR**: Can't load script: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd + At: main/main.cpp:1658:start() - Condition "script_res.is_null()" is true. Returned: false diff --git a/logs/godot_2022-07-07_21.35.04.log b/logs/godot_2022-07-07_21.35.04.log new file mode 100644 index 0000000..4cda1f2 --- /dev/null +++ b/logs/godot_2022-07-07_21.35.04.log @@ -0,0 +1,7 @@ +Godot Engine v3.3.4.stable.official.faf3f883d - https://godotengine.org +OpenGL ES 3.0 Renderer: NVIDIA GeForce GTX 1080/PCIe/SSE2 +OpenGL ES Batching: ON + +Project is missing: C:/Projects/Study/Parsing/Editor/project.godot +**ERROR**: Condition "default_certs != __null" is true. + At: modules/mbedtls/crypto_mbedtls.cpp:201:load_default_certificates() - Condition "default_certs != __null" is true. diff --git a/logs/godot_2022-07-07_21.35.40.log b/logs/godot_2022-07-07_21.35.40.log new file mode 100644 index 0000000..442a41c --- /dev/null +++ b/logs/godot_2022-07-07_21.35.40.log @@ -0,0 +1,12 @@ +Godot Engine v3.3.4.stable.official.faf3f883d - https://godotengine.org +OpenGL ES 3.0 Renderer: NVIDIA GeForce GTX 1080/PCIe/SSE2 +OpenGL ES Batching: ON + +**ERROR**: Condition "err" is true. Returned: err + At: modules/gdscript/gdscript.cpp:815:load_source_code() - Condition "err" is true. Returned: err +**ERROR**: Cannot load source code from file 'C:/Projects/Study/LangStudies/Editor/Lecture.3.gd'. + At: modules/gdscript/gdscript.cpp:2241:load() - Condition "err != OK" is true. Returned: RES() +**ERROR**: Failed loading resource: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd. Make sure resources have been imported by opening the project in the editor at least once. + At: core/io/resource_loader.cpp:282:_load() - Condition "found" is true. Returned: RES() +**ERROR**: Can't load script: C:/Projects/Study/LangStudies/Editor/Lecture.3.gd + At: main/main.cpp:1658:start() - Condition "script_res.is_null()" is true. Returned: false