summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--package.json2
-rw-r--r--spec/parser.js170
-rw-r--r--spec/parser_spec.rb426
-rw-r--r--spec/tokenizer.js317
-rw-r--r--spec/tokenizer_spec.rb322
5 files changed, 488 insertions, 749 deletions
diff --git a/package.json b/package.json
index b469398..e647791 100644
--- a/package.json
+++ b/package.json
@@ -29,7 +29,7 @@
"handlebars": "bin/handlebars"
},
"scripts": {
- "test": "node_modules/.bin/mocha -u qunit spec/qunit_spec.js"
+ "test": "node_modules/.bin/mocha spec/tokenizer.js spec/parser.js && node_modules/.bin/mocha -u qunit spec/qunit_spec.js"
},
"optionalDependencies": {}
}
diff --git a/spec/parser.js b/spec/parser.js
new file mode 100644
index 0000000..6fb8d9a
--- /dev/null
+++ b/spec/parser.js
@@ -0,0 +1,170 @@
+var Handlebars = require('../lib/handlebars');
+
+require('should');
+
+describe('parser', function() {
+ function ast_for(template) {
+ var ast = Handlebars.parse(template);
+ return Handlebars.print(ast);
+ }
+
+ it('parses simple mustaches', function() {
+ ast_for('{{foo}}').should.equal("{{ ID:foo [] }}\n");
+ ast_for('{{foo?}}').should.equal("{{ ID:foo? [] }}\n");
+ ast_for('{{foo_}}').should.equal("{{ ID:foo_ [] }}\n");
+ ast_for('{{foo-}}').should.equal("{{ ID:foo- [] }}\n");
+ ast_for('{{foo:}}').should.equal("{{ ID:foo: [] }}\n");
+ });
+
+ it('parses simple mustaches with data', function() {
+ ast_for("{{@foo}}").should.equal("{{ @ID:foo [] }}\n");
+ });
+
+ it('parses mustaches with paths', function() {
+ ast_for("{{foo/bar}}").should.equal("{{ PATH:foo/bar [] }}\n");
+ });
+
+ it('parses mustaches with this/foo', function() {
+ ast_for("{{this/foo}}").should.equal("{{ ID:foo [] }}\n");
+ });
+
+ it('parses mustaches with - in a path', function() {
+ ast_for("{{foo-bar}}").should.equal("{{ ID:foo-bar [] }}\n");
+ });
+
+ it('parses mustaches with parameters', function() {
+ ast_for("{{foo bar}}").should.equal("{{ ID:foo [ID:bar] }}\n");
+ });
+
+ it('parses mustaches with string parameters', function() {
+ ast_for("{{foo bar \"baz\" }}").should.equal('{{ ID:foo [ID:bar, "baz"] }}\n');
+ });
+
+ it('parses mustaches with INTEGER parameters', function() {
+ ast_for("{{foo 1}}").should.equal("{{ ID:foo [INTEGER{1}] }}\n");
+ });
+
+ it('parses mustaches with BOOLEAN parameters', function() {
+ ast_for("{{foo true}}").should.equal("{{ ID:foo [BOOLEAN{true}] }}\n");
+ ast_for("{{foo false}}").should.equal("{{ ID:foo [BOOLEAN{false}] }}\n");
+ });
+
+ it('parses mutaches with DATA parameters', function() {
+ ast_for("{{foo @bar}}").should.equal("{{ ID:foo [@ID:bar] }}\n");
+ });
+
+ it('parses mustaches with hash arguments', function() {
+ ast_for("{{foo bar=baz}}").should.equal("{{ ID:foo [] HASH{bar=ID:baz} }}\n");
+ ast_for("{{foo bar=1}}").should.equal("{{ ID:foo [] HASH{bar=INTEGER{1}} }}\n");
+ ast_for("{{foo bar=true}}").should.equal("{{ ID:foo [] HASH{bar=BOOLEAN{true}} }}\n");
+ ast_for("{{foo bar=false}}").should.equal("{{ ID:foo [] HASH{bar=BOOLEAN{false}} }}\n");
+ ast_for("{{foo bar=@baz}}").should.equal("{{ ID:foo [] HASH{bar=@ID:baz} }}\n");
+
+ ast_for("{{foo bar=baz bat=bam}}").should.equal("{{ ID:foo [] HASH{bar=ID:baz, bat=ID:bam} }}\n");
+ ast_for("{{foo bar=baz bat=\"bam\"}}").should.equal('{{ ID:foo [] HASH{bar=ID:baz, bat="bam"} }}\n');
+
+ ast_for("{{foo bat='bam'}}").should.equal('{{ ID:foo [] HASH{bat="bam"} }}\n');
+
+ ast_for("{{foo omg bar=baz bat=\"bam\"}}").should.equal('{{ ID:foo [ID:omg] HASH{bar=ID:baz, bat="bam"} }}\n');
+ ast_for("{{foo omg bar=baz bat=\"bam\" baz=1}}").should.equal('{{ ID:foo [ID:omg] HASH{bar=ID:baz, bat="bam", baz=INTEGER{1}} }}\n');
+ ast_for("{{foo omg bar=baz bat=\"bam\" baz=true}}").should.equal('{{ ID:foo [ID:omg] HASH{bar=ID:baz, bat="bam", baz=BOOLEAN{true}} }}\n');
+ ast_for("{{foo omg bar=baz bat=\"bam\" baz=false}}").should.equal('{{ ID:foo [ID:omg] HASH{bar=ID:baz, bat="bam", baz=BOOLEAN{false}} }}\n');
+ });
+
+ it('parses contents followed by a mustache', function() {
+ ast_for("foo bar {{baz}}").should.equal("CONTENT[ \'foo bar \' ]\n{{ ID:baz [] }}\n");
+ });
+
+ it('parses a partial', function() {
+ ast_for("{{> foo }}").should.equal("{{> PARTIAL:foo }}\n");
+ });
+
+ it('parses a partial with context', function() {
+ ast_for("{{> foo bar}}").should.equal("{{> PARTIAL:foo ID:bar }}\n");
+ });
+
+ it('parses a partial with a complex name', function() {
+ ast_for("{{> shared/partial?.bar}}").should.equal("{{> PARTIAL:shared/partial?.bar }}\n");
+ });
+
+ it('parses a comment', function() {
+ ast_for("{{! this is a comment }}").should.equal("{{! ' this is a comment ' }}\n");
+ });
+
+ it('parses a multi-line comment', function() {
+ ast_for("{{!\nthis is a multi-line comment\n}}").should.equal("{{! \'\nthis is a multi-line comment\n\' }}\n");
+ });
+
+ it('parses an inverse section', function() {
+ ast_for("{{#foo}} bar {{^}} baz {{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n CONTENT[ ' bar ' ]\n {{^}}\n CONTENT[ ' baz ' ]\n");
+ });
+
+ it('parses an inverse (else-style) section', function() {
+ ast_for("{{#foo}} bar {{else}} baz {{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n CONTENT[ ' bar ' ]\n {{^}}\n CONTENT[ ' baz ' ]\n");
+ });
+
+ it('parses empty blocks', function() {
+ ast_for("{{#foo}}{{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n");
+ });
+
+ it('parses empty blocks with empty inverse section', function() {
+ ast_for("{{#foo}}{{^}}{{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n {{^}}\n");
+ });
+
+ it('parses empty blocks with empty inverse (else-style) section', function() {
+ ast_for("{{#foo}}{{else}}{{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n {{^}}\n");
+ });
+
+ it('parses non-empty blocks with empty inverse section', function() {
+ ast_for("{{#foo}} bar {{^}}{{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n CONTENT[ ' bar ' ]\n {{^}}\n");
+ });
+
+ it('parses non-empty blocks with empty inverse (else-style) section', function() {
+ ast_for("{{#foo}} bar {{else}}{{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n CONTENT[ ' bar ' ]\n {{^}}\n");
+ });
+
+ it('parses empty blocks with non-empty inverse section', function() {
+ ast_for("{{#foo}}{{^}} bar {{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n {{^}}\n CONTENT[ ' bar ' ]\n");
+ });
+
+ it('parses empty blocks with non-empty inverse (else-style) section', function() {
+ ast_for("{{#foo}}{{else}} bar {{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n PROGRAM:\n {{^}}\n CONTENT[ ' bar ' ]\n");
+ });
+
+ it('parses a standalone inverse section', function() {
+ ast_for("{{^foo}}bar{{/foo}}").should.equal("BLOCK:\n {{ ID:foo [] }}\n {{^}}\n CONTENT[ 'bar' ]\n");
+ });
+
+ it("raises if there's a Parse error", function() {
+ (function() {
+ ast_for("{{foo}");
+ }).should.throw(/Parse error on line 1/);
+ (function() {
+ ast_for("{{foo &}}");
+ }).should.throw(/Parse error on line 1/);
+ (function() {
+ ast_for("{{#goodbyes}}{{/hellos}}");
+ }).should.throw(/goodbyes doesn't match hellos/);
+ });
+
+ it('knows how to report the correct line number in errors', function() {
+ (function() {
+ ast_for("hello\nmy\n{{foo}");
+ }).should.throw(/Parse error on line 3/);
+ (function() {
+ ast_for("hello\n\nmy\n\n{{foo}");
+ }).should.throw(/Parse error on line 5/);
+ });
+
+ it('knows how to report the correct line number in errors when the first character is a newline', function() {
+ (function() {
+ ast_for("\n\nhello\n\nmy\n\n{{foo}");
+ }).should.throw(/Parse error on line 7/);
+ });
+
+ describe('externally compiled AST', function() {
+ it('can pass through an already-compiled AST', function() {
+ ast_for(new Handlebars.AST.ProgramNode([ new Handlebars.AST.ContentNode("Hello")])).should.equal("CONTENT[ \'Hello\' ]\n");
+ });
+ });
+});
diff --git a/spec/parser_spec.rb b/spec/parser_spec.rb
deleted file mode 100644
index b25e889..0000000
--- a/spec/parser_spec.rb
+++ /dev/null
@@ -1,426 +0,0 @@
-require "spec_helper"
-
-describe "Parser" do
- let(:handlebars) { @context["Handlebars"] }
-
- before(:all) do
- @compiles = true
- end
-
- def root(&block)
- ASTBuilder.build do
- instance_eval(&block)
- end
- end
-
- def ast_for(string)
- ast = handlebars.parse(string)
- handlebars.print(ast)
- end
-
- class ASTBuilder
- def self.build(&block)
- ret = new
- ret.evaluate(&block)
- ret.out
- end
-
- attr_reader :out
-
- def initialize
- @padding = 0
- @out = ""
- end
-
- def evaluate(&block)
- instance_eval(&block)
- end
-
- def pad(string)
- @out << (" " * @padding) + string + "\n"
- end
-
- def with_padding
- @padding += 1
- ret = yield
- @padding -= 1
- ret
- end
-
- def program
- pad("PROGRAM:")
- with_padding { yield }
- end
-
- def inverse
- pad("{{^}}")
- with_padding { yield }
- end
-
- def block
- pad("BLOCK:")
- with_padding { yield }
- end
-
- def inverted_block
- pad("INVERSE:")
- with_padding { yield }
- end
-
- def mustache(id, params = [], hash = nil)
- hash = " #{hash}" if hash
- pad("{{ #{id} [#{params.join(", ")}]#{hash} }}")
- end
-
- def partial(id, context = nil)
- content = id.dup
- content << " #{context}" if context
- pad("{{> #{content} }}")
- end
-
- def comment(comment)
- pad("{{! '#{comment}' }}")
- end
-
- def multiline_comment(comment)
- pad("{{! '\n#{comment}\n' }}")
- end
-
- def content(string)
- pad("CONTENT[ '#{string}' ]")
- end
-
- def string(string)
- string.inspect
- end
-
- def integer(string)
- "INTEGER{#{string}}"
- end
-
- def boolean(string)
- "BOOLEAN{#{string}}"
- end
-
- def hash(*pairs)
- "HASH{" + pairs.map {|k,v| "#{k}=#{v}" }.join(", ") + "}"
- end
-
- def id(id)
- "ID:#{id}"
- end
-
- def data(id)
- "@ID:#{id}"
- end
-
- def partial_name(name)
- "PARTIAL:#{name}"
- end
-
- def path(*parts)
- "PATH:#{parts.join("/")}"
- end
- end
-
- it "parses simple mustaches" do
- ast_for("{{foo}}").should == root { mustache id("foo") }
- ast_for("{{foo?}}").should == root { mustache id("foo?") }
- ast_for("{{foo_}}").should == root { mustache id("foo_") }
- ast_for("{{foo-}}").should == root { mustache id("foo-") }
- ast_for("{{foo:}}").should == root { mustache id("foo:") }
- end
-
- it "parses simple mustaches with data" do
- ast_for("{{@foo}}").should == root { mustache data("foo") }
- end
-
- it "parses mustaches with paths" do
- ast_for("{{foo/bar}}").should == root { mustache path("foo", "bar") }
- end
-
- it "parses mustaches with this/foo" do
- ast_for("{{this/foo}}").should == root { mustache id("foo") }
- end
-
- it "parses mustaches with - in a path" do
- ast_for("{{foo-bar}}").should == root { mustache id("foo-bar") }
- end
-
- it "parses mustaches with parameters" do
- ast_for("{{foo bar}}").should == root { mustache id("foo"), [id("bar")] }
- end
-
- it "parses mustaches with hash arguments" do
- ast_for("{{foo bar=baz}}").should == root do
- mustache id("foo"), [], hash(["bar", id("baz")])
- end
-
- ast_for("{{foo bar=1}}").should == root do
- mustache id("foo"), [], hash(["bar", integer("1")])
- end
-
- ast_for("{{foo bar=true}}").should == root do
- mustache id("foo"), [], hash(["bar", boolean("true")])
- end
-
- ast_for("{{foo bar=false}}").should == root do
- mustache id("foo"), [], hash(["bar", boolean("false")])
- end
-
- ast_for("{{foo bar=@baz}}").should == root do
- mustache id("foo"), [], hash(["bar", data("baz")])
- end
-
- ast_for("{{foo bar=baz bat=bam}}").should == root do
- mustache id("foo"), [], hash(["bar", "ID:baz"], ["bat", "ID:bam"])
- end
-
- ast_for("{{foo bar=baz bat=\"bam\"}}").should == root do
- mustache id("foo"), [], hash(["bar", "ID:baz"], ["bat", "\"bam\""])
- end
-
- ast_for("{{foo bat='bam'}}").should == root do
- mustache id("foo"), [], hash(["bat", "\"bam\""])
- end
-
- ast_for("{{foo omg bar=baz bat=\"bam\"}}").should == root do
- mustache id("foo"), [id("omg")], hash(["bar", id("baz")], ["bat", string("bam")])
- end
-
- ast_for("{{foo omg bar=baz bat=\"bam\" baz=1}}").should == root do
- mustache id("foo"), [id("omg")], hash(["bar", id("baz")], ["bat", string("bam")], ["baz", integer("1")])
- end
-
- ast_for("{{foo omg bar=baz bat=\"bam\" baz=true}}").should == root do
- mustache id("foo"), [id("omg")], hash(["bar", id("baz")], ["bat", string("bam")], ["baz", boolean("true")])
- end
-
- ast_for("{{foo omg bar=baz bat=\"bam\" baz=false}}").should == root do
- mustache id("foo"), [id("omg")], hash(["bar", id("baz")], ["bat", string("bam")], ["baz", boolean("false")])
- end
- end
-
- it "parses mustaches with string parameters" do
- ast_for("{{foo bar \"baz\" }}").should == root { mustache id("foo"), [id("bar"), string("baz")] }
- end
-
- it "parses mustaches with INTEGER parameters" do
- ast_for("{{foo 1}}").should == root { mustache id("foo"), [integer("1")] }
- end
-
- it "parses mustaches with BOOLEAN parameters" do
- ast_for("{{foo true}}").should == root { mustache id("foo"), [boolean("true")] }
- ast_for("{{foo false}}").should == root { mustache id("foo"), [boolean("false")] }
- end
-
- it "parses mutaches with DATA parameters" do
- ast_for("{{foo @bar}}").should == root { mustache id("foo"), [data("bar")] }
- end
-
- it "parses contents followed by a mustache" do
- ast_for("foo bar {{baz}}").should == root do
- content "foo bar "
- mustache id("baz")
- end
- end
-
- it "parses a partial" do
- ast_for("{{> foo }}").should == root { partial partial_name("foo") }
- end
-
- it "parses a partial with context" do
- ast_for("{{> foo bar}}").should == root { partial partial_name("foo"), id("bar") }
- end
-
- it "parses a partial with a complex name" do
- ast_for("{{> shared/partial?.bar}}").should == root { partial partial_name("shared/partial?.bar") }
- end
-
- it "parses a comment" do
- ast_for("{{! this is a comment }}").should == root do
- comment " this is a comment "
- end
- end
-
- it "parses a multi-line comment" do
- ast_for("{{!\nthis is a multi-line comment\n}}").should == root do
- multiline_comment "this is a multi-line comment"
- end
- end
-
- it "parses an inverse section" do
- ast_for("{{#foo}} bar {{^}} baz {{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- content " bar "
- end
-
- inverse do
- content " baz "
- end
- end
- end
- end
-
- it "parses an inverse ('else'-style) section" do
- ast_for("{{#foo}} bar {{else}} baz {{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- content " bar "
- end
-
- inverse do
- content " baz "
- end
- end
- end
- end
-
- it "parses empty blocks" do
- ast_for("{{#foo}}{{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- # empty program
- end
- end
- end
- end
-
- it "parses empty blocks with empty inverse section" do
- ast_for("{{#foo}}{{^}}{{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- # empty program
- end
-
- inverse do
- # empty inverse
- end
- end
- end
- end
-
- it "parses empty blocks with empty inverse ('else'-style) section" do
- ast_for("{{#foo}}{{else}}{{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- # empty program
- end
-
- inverse do
- # empty inverse
- end
- end
- end
- end
-
- it "parses non-empty blocks with empty inverse section" do
- ast_for("{{#foo}} bar {{^}}{{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- content " bar "
- end
-
- inverse do
- # empty inverse
- end
- end
- end
- end
-
- it "parses non-empty blocks with empty inverse ('else'-style) section" do
- ast_for("{{#foo}} bar {{else}}{{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- content " bar "
- end
-
- inverse do
- # empty inverse
- end
- end
- end
- end
-
- it "parses empty blocks with non-empty inverse section" do
- ast_for("{{#foo}}{{^}} bar {{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- # empty program
- end
-
- inverse do
- content " bar "
- end
- end
- end
- end
-
- it "parses empty blocks with non-empty inverse ('else'-style) section" do
- ast_for("{{#foo}}{{else}} bar {{/foo}}").should == root do
- block do
- mustache id("foo")
-
- program do
- # empty program
- end
-
- inverse do
- content " bar "
- end
- end
- end
- end
-
- it "parses a standalone inverse section" do
- ast_for("{{^foo}}bar{{/foo}}").should == root do
- block do
- mustache id("foo")
-
- inverse do
- content "bar"
- end
- end
- end
- end
-
- it "raises if there's a Parse error" do
- lambda { ast_for("{{foo}") }.should raise_error(V8::JSError, /Parse error on line 1/)
- lambda { ast_for("{{foo &}}")}.should raise_error(V8::JSError, /Parse error on line 1/)
- lambda { ast_for("{{#goodbyes}}{{/hellos}}") }.should raise_error(V8::JSError, /goodbyes doesn't match hellos/)
- end
-
- it "knows how to report the correct line number in errors" do
- lambda { ast_for("hello\nmy\n{{foo}") }.should raise_error(V8::JSError, /Parse error on line 3/m)
- lambda { ast_for("hello\n\nmy\n\n{{foo}") }.should raise_error(V8::JSError, /Parse error on line 5/m)
- end
-
- it "knows how to report the correct line number in errors when the first character is a newline" do
- lambda { ast_for("\n\nhello\n\nmy\n\n{{foo}") }.should raise_error(V8::JSError, /Parse error on line 7/m)
- end
-
- context "externally compiled AST" do
- it "can pass through an already-compiled AST" do
- ast_for(@context.eval('new Handlebars.AST.ProgramNode([ new Handlebars.AST.ContentNode("Hello")]);')).should == root do
- content "Hello"
- end
- end
- end
-end
diff --git a/spec/tokenizer.js b/spec/tokenizer.js
new file mode 100644
index 0000000..c69f406
--- /dev/null
+++ b/spec/tokenizer.js
@@ -0,0 +1,317 @@
+var Handlebars = require('../lib/handlebars'),
+ should = require('should');
+
+should.Assertion.prototype.match_tokens = function(tokens) {
+ this.obj.forEach(function(value, index) {
+ value.name.should.equal(tokens[index]);
+ });
+};
+should.Assertion.prototype.be_token = function(name, text) {
+ this.obj.should.eql({name: name, text: text});
+};
+
+describe('Tokenizer', function() {
+ function tokenize(template) {
+ var parser = Handlebars.Parser,
+ lexer = parser.lexer;
+
+ lexer.setInput(template);
+ var out = [],
+ token;
+
+ while (token = lexer.lex()) {
+ var result = parser.terminals_[token] || token;
+ if (!result || result === 'EOF' || result === 'INVALID') {
+ break;
+ }
+ out.push({name: result, text: lexer.yytext});
+ }
+
+ return out;
+ }
+
+ it('tokenizes a simple mustache as "OPEN ID CLOSE"', function() {
+ var result = tokenize("{{foo}}");
+ result.should.match_tokens(['OPEN', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "foo");
+ });
+
+ it('supports unescaping with &', function() {
+ var result = tokenize("{{&bar}}");
+ result.should.match_tokens(['OPEN', 'ID', 'CLOSE']);
+
+ result[0].should.be_token("OPEN", "{{&");
+ result[1].should.be_token("ID", "bar");
+ });
+
+ it('supports unescaping with {{{', function() {
+ var result = tokenize("{{{bar}}}");
+ result.should.match_tokens(['OPEN_UNESCAPED', 'ID', 'CLOSE_UNESCAPED']);
+
+ result[1].should.be_token("ID", "bar");
+ });
+
+ it('supports escaping delimiters', function() {
+ var result = tokenize("{{foo}} \\{{bar}} {{baz}}");
+ result.should.match_tokens(['OPEN', 'ID', 'CLOSE', 'CONTENT', 'CONTENT', 'OPEN', 'ID', 'CLOSE']);
+
+ result[4].should.be_token("CONTENT", "{{bar}} ");
+ });
+
+ it('supports escaping multiple delimiters', function() {
+ var result = tokenize("{{foo}} \\{{bar}} \\{{baz}}");
+ result.should.match_tokens(['OPEN', 'ID', 'CLOSE', 'CONTENT', 'CONTENT', 'CONTENT']);
+
+ result[3].should.be_token("CONTENT", " ");
+ result[4].should.be_token("CONTENT", "{{bar}} ");
+ result[5].should.be_token("CONTENT", "{{baz}}");
+ });
+
+ it('supports escaping a triple stash', function() {
+ var result = tokenize("{{foo}} \\{{{bar}}} {{baz}}");
+ result.should.match_tokens(['OPEN', 'ID', 'CLOSE', 'CONTENT', 'CONTENT', 'OPEN', 'ID', 'CLOSE']);
+
+ result[4].should.be_token("CONTENT", "{{{bar}}} ");
+ });
+
+ it('tokenizes a simple path', function() {
+ var result = tokenize("{{foo/bar}}");
+ result.should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'CLOSE']);
+ });
+
+ it('allows dot notation', function() {
+ var result = tokenize("{{foo.bar}}");
+ result.should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'CLOSE']);
+
+ tokenize("{{foo.bar.baz}}").should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'SEP', 'ID', 'CLOSE']);
+ });
+
+ it('allows path literals with []', function() {
+ var result = tokenize("{{foo.[bar]}}");
+ result.should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'CLOSE']);
+ });
+
+ it('allows multiple path literals on a line with []', function() {
+ var result = tokenize("{{foo.[bar]}}{{foo.[baz]}}");
+ result.should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'CLOSE', 'OPEN', 'ID', 'SEP', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes {{.}} as OPEN ID CLOSE', function() {
+ var result = tokenize("{{.}}");
+ result.should.match_tokens(['OPEN', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes a path as "OPEN (ID SEP)* ID CLOSE"', function() {
+ var result = tokenize("{{../foo/bar}}");
+ result.should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'SEP', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "..");
+ });
+
+ it('tokenizes a path with .. as a parent path', function() {
+ var result = tokenize("{{../foo.bar}}");
+ result.should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'SEP', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "..");
+ });
+
+ it('tokenizes a path with this/foo as OPEN ID SEP ID CLOSE', function() {
+ var result = tokenize("{{this/foo}}");
+ result.should.match_tokens(['OPEN', 'ID', 'SEP', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "this");
+ result[3].should.be_token("ID", "foo");
+ });
+
+ it('tokenizes a simple mustache with spaces as "OPEN ID CLOSE"', function() {
+ var result = tokenize("{{ foo }}");
+ result.should.match_tokens(['OPEN', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "foo");
+ });
+
+ it('tokenizes a simple mustache with line breaks as "OPEN ID ID CLOSE"', function() {
+ var result = tokenize("{{ foo \n bar }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "foo");
+ });
+
+ it('tokenizes raw content as "CONTENT"', function() {
+ var result = tokenize("foo {{ bar }} baz");
+ result.should.match_tokens(['CONTENT', 'OPEN', 'ID', 'CLOSE', 'CONTENT']);
+ result[0].should.be_token("CONTENT", "foo ");
+ result[4].should.be_token("CONTENT", " baz");
+ });
+
+ it('tokenizes a partial as "OPEN_PARTIAL ID CLOSE"', function() {
+ var result = tokenize("{{> foo}}");
+ result.should.match_tokens(['OPEN_PARTIAL', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes a partial with context as "OPEN_PARTIAL ID ID CLOSE"', function() {
+ var result = tokenize("{{> foo bar }}");
+ result.should.match_tokens(['OPEN_PARTIAL', 'ID', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes a partial without spaces as "OPEN_PARTIAL ID CLOSE"', function() {
+ var result = tokenize("{{>foo}}");
+ result.should.match_tokens(['OPEN_PARTIAL', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes a partial space at the }); as "OPEN_PARTIAL ID CLOSE"', function() {
+ var result = tokenize("{{>foo }}");
+ result.should.match_tokens(['OPEN_PARTIAL', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes a partial space at the }); as "OPEN_PARTIAL ID CLOSE"', function() {
+ var result = tokenize("{{>foo/bar.baz }}");
+ result.should.match_tokens(['OPEN_PARTIAL', 'ID', 'SEP', 'ID', 'SEP', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes a comment as "COMMENT"', function() {
+ var result = tokenize("foo {{! this is a comment }} bar {{ baz }}");
+ result.should.match_tokens(['CONTENT', 'COMMENT', 'CONTENT', 'OPEN', 'ID', 'CLOSE']);
+ result[1].should.be_token("COMMENT", " this is a comment ");
+ });
+
+ it('tokenizes a block comment as "COMMENT"', function() {
+ var result = tokenize("foo {{!-- this is a {{comment}} --}} bar {{ baz }}");
+ result.should.match_tokens(['CONTENT', 'COMMENT', 'CONTENT', 'OPEN', 'ID', 'CLOSE']);
+ result[1].should.be_token("COMMENT", " this is a {{comment}} ");
+ });
+
+ it('tokenizes a block comment with whitespace as "COMMENT"', function() {
+ var result = tokenize("foo {{!-- this is a\n{{comment}}\n--}} bar {{ baz }}");
+ result.should.match_tokens(['CONTENT', 'COMMENT', 'CONTENT', 'OPEN', 'ID', 'CLOSE']);
+ result[1].should.be_token("COMMENT", " this is a\n{{comment}}\n");
+ });
+
+ it('tokenizes open and closing blocks as OPEN_BLOCK, ID, CLOSE ..., OPEN_ENDBLOCK ID CLOSE', function() {
+ var result = tokenize("{{#foo}}content{{/foo}}");
+ result.should.match_tokens(['OPEN_BLOCK', 'ID', 'CLOSE', 'CONTENT', 'OPEN_ENDBLOCK', 'ID', 'CLOSE']);
+ });
+
+ it('tokenizes inverse sections as "OPEN_INVERSE CLOSE"', function() {
+ tokenize("{{^}}").should.match_tokens(['OPEN_INVERSE', 'CLOSE']);
+ tokenize("{{else}}").should.match_tokens(['OPEN_INVERSE', 'CLOSE']);
+ tokenize("{{ else }}").should.match_tokens(['OPEN_INVERSE', 'CLOSE']);
+ });
+
+ it('tokenizes inverse sections with ID as "OPEN_INVERSE ID CLOSE"', function() {
+ var result = tokenize("{{^foo}}");
+ result.should.match_tokens(['OPEN_INVERSE', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "foo");
+ });
+
+ it('tokenizes inverse sections with ID and spaces as "OPEN_INVERSE ID CLOSE"', function() {
+ var result = tokenize("{{^ foo }}");
+ result.should.match_tokens(['OPEN_INVERSE', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "foo");
+ });
+
+ it('tokenizes mustaches with params as "OPEN ID ID ID CLOSE"', function() {
+ var result = tokenize("{{ foo bar baz }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'CLOSE']);
+ result[1].should.be_token("ID", "foo");
+ result[2].should.be_token("ID", "bar");
+ result[3].should.be_token("ID", "baz");
+ });
+
+ it('tokenizes mustaches with String params as "OPEN ID ID STRING CLOSE"', function() {
+ var result = tokenize("{{ foo bar \"baz\" }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'STRING', 'CLOSE']);
+ result[3].should.be_token("STRING", "baz");
+ });
+
+ it('tokenizes mustaches with String params using single quotes as "OPEN ID ID STRING CLOSE"', function() {
+ var result = tokenize("{{ foo bar \'baz\' }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'STRING', 'CLOSE']);
+ result[3].should.be_token("STRING", "baz");
+ });
+
+ it('tokenizes String params with spaces inside as "STRING"', function() {
+ var result = tokenize("{{ foo bar \"baz bat\" }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'STRING', 'CLOSE']);
+ result[3].should.be_token("STRING", "baz bat");
+ });
+
+ it('tokenizes String params with escapes quotes as STRING', function() {
+ var result = tokenize('{{ foo "bar\\"baz" }}');
+ result.should.match_tokens(['OPEN', 'ID', 'STRING', 'CLOSE']);
+ result[2].should.be_token("STRING", 'bar"baz');
+ });
+
+ it('tokenizes String params using single quotes with escapes quotes as STRING', function() {
+ var result = tokenize("{{ foo 'bar\\'baz' }}");
+ result.should.match_tokens(['OPEN', 'ID', 'STRING', 'CLOSE']);
+ result[2].should.be_token("STRING", "bar'baz");
+ });
+
+ it('tokenizes numbers', function() {
+ var result = tokenize('{{ foo 1 }}');
+ result.should.match_tokens(['OPEN', 'ID', 'INTEGER', 'CLOSE']);
+ result[2].should.be_token("INTEGER", "1");
+
+ result = tokenize('{{ foo -1 }}');
+ result.should.match_tokens(['OPEN', 'ID', 'INTEGER', 'CLOSE']);
+ result[2].should.be_token("INTEGER", "-1");
+ });
+
+ it('tokenizes booleans', function() {
+ var result = tokenize('{{ foo true }}');
+ result.should.match_tokens(['OPEN', 'ID', 'BOOLEAN', 'CLOSE']);
+ result[2].should.be_token("BOOLEAN", "true");
+
+ result = tokenize('{{ foo false }}');
+ result.should.match_tokens(['OPEN', 'ID', 'BOOLEAN', 'CLOSE']);
+ result[2].should.be_token("BOOLEAN", "false");
+ });
+
+ it('tokenizes hash arguments', function() {
+ var result = tokenize("{{ foo bar=baz }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'EQUALS', 'ID', 'CLOSE']);
+
+ result = tokenize("{{ foo bar baz=bat }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'ID', 'CLOSE']);
+
+ result = tokenize("{{ foo bar baz=1 }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'INTEGER', 'CLOSE']);
+
+ result = tokenize("{{ foo bar baz=true }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'BOOLEAN', 'CLOSE']);
+
+ result = tokenize("{{ foo bar baz=false }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'BOOLEAN', 'CLOSE']);
+
+ result = tokenize("{{ foo bar\n baz=bat }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'ID', 'CLOSE']);
+
+ result = tokenize("{{ foo bar baz=\"bat\" }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'STRING', 'CLOSE']);
+
+ result = tokenize("{{ foo bar baz=\"bat\" bam=wot }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'STRING', 'ID', 'EQUALS', 'ID', 'CLOSE']);
+
+ result = tokenize("{{foo omg bar=baz bat=\"bam\"}}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'ID', 'EQUALS', 'ID', 'ID', 'EQUALS', 'STRING', 'CLOSE']);
+ result[2].should.be_token("ID", "omg");
+ });
+
+ it('tokenizes special @ identifiers', function() {
+ var result = tokenize("{{ @foo }}");
+ result.should.match_tokens(['OPEN', 'DATA', 'ID', 'CLOSE']);
+ result[2].should.be_token("ID", "foo");
+
+ result = tokenize("{{ foo @bar }}");
+ result.should.match_tokens(['OPEN', 'ID', 'DATA', 'ID', 'CLOSE']);
+ result[3].should.be_token("ID", "bar");
+
+ result = tokenize("{{ foo bar=@baz }}");
+ result.should.match_tokens(['OPEN', 'ID', 'ID', 'EQUALS', 'DATA', 'ID', 'CLOSE']);
+ result[5].should.be_token("ID", "baz");
+ });
+
+ it('does not time out in a mustache with a single } followed by EOF', function() {
+ tokenize("{{foo}").should.match_tokens(['OPEN', 'ID']);
+ });
+
+ it('does not time out in a mustache when invalid ID characters are used', function() {
+ tokenize("{{foo & }}").should.match_tokens(['OPEN', 'ID']);
+ });
+});
diff --git a/spec/tokenizer_spec.rb b/spec/tokenizer_spec.rb
deleted file mode 100644
index 0a7c3f9..0000000
--- a/spec/tokenizer_spec.rb
+++ /dev/null
@@ -1,322 +0,0 @@
-require "spec_helper"
-require "timeout"
-
-describe "Tokenizer" do
- let(:parser) { Handlebars::Spec::PARSER_CONTEXT["handlebars"] }
- let(:lexer) { Handlebars::Spec::PARSER_CONTEXT["handlebars"]["lexer"] }
-
- Token = Struct.new(:name, :text)
-
- def tokenize(string)
- lexer.setInput(string)
- out = []
-
- while token = lexer.lex
- # p token
- result = parser.terminals_[token] || token
- # p result
- break if !result || result == "EOF" || result == "INVALID"
- out << Token.new(result, lexer.yytext)
- end
-
- out
- end
-
- RSpec::Matchers.define :match_tokens do |tokens|
- match do |result|
- result.map(&:name).should == tokens
- end
- end
-
- RSpec::Matchers.define :be_token do |name, string|
- match do |token|
- token.name.should == name
- token.text.should == string
- end
- end
-
- it "tokenizes a simple mustache as 'OPEN ID CLOSE'" do
- result = tokenize("{{foo}}")
- result.should match_tokens(%w(OPEN ID CLOSE))
- result[1].should be_token("ID", "foo")
- end
-
- it "supports unescaping with &" do
- result = tokenize("{{&bar}}")
- result.should match_tokens(%w(OPEN ID CLOSE))
-
- result[0].should be_token("OPEN", "{{&")
- result[1].should be_token("ID", "bar")
- end
-
- it "supports unescaping with {{{" do
- result = tokenize("{{{bar}}}")
- result.should match_tokens(%w(OPEN_UNESCAPED ID CLOSE_UNESCAPED))
-
- result[1].should be_token("ID", "bar")
- end
-
- it "supports escaping delimiters" do
- result = tokenize("{{foo}} \\{{bar}} {{baz}}")
- result.should match_tokens(%w(OPEN ID CLOSE CONTENT CONTENT OPEN ID CLOSE))
-
- result[4].should be_token("CONTENT", "{{bar}} ")
- end
-
- it "supports escaping multiple delimiters" do
- result = tokenize("{{foo}} \\{{bar}} \\{{baz}}")
- result.should match_tokens(%w(OPEN ID CLOSE CONTENT CONTENT CONTENT))
-
- result[3].should be_token("CONTENT", " ")
- result[4].should be_token("CONTENT", "{{bar}} ")
- result[5].should be_token("CONTENT", "{{baz}}")
- end
-
- it "supports escaping a triple stash" do
- result = tokenize("{{foo}} \\{{{bar}}} {{baz}}")
- result.should match_tokens(%w(OPEN ID CLOSE CONTENT CONTENT OPEN ID CLOSE))
-
- result[4].should be_token("CONTENT", "{{{bar}}} ")
- end
-
- it "tokenizes a simple path" do
- result = tokenize("{{foo/bar}}")
- result.should match_tokens(%w(OPEN ID SEP ID CLOSE))
- end
-
- it "allows dot notation" do
- result = tokenize("{{foo.bar}}")
- result.should match_tokens(%w(OPEN ID SEP ID CLOSE))
-
- tokenize("{{foo.bar.baz}}").should match_tokens(%w(OPEN ID SEP ID SEP ID CLOSE))
- end
-
- it "allows path literals with []" do
- result = tokenize("{{foo.[bar]}}")
- result.should match_tokens(%w(OPEN ID SEP ID CLOSE))
- end
-
- it "allows multiple path literals on a line with []" do
- result = tokenize("{{foo.[bar]}}{{foo.[baz]}}")
- result.should match_tokens(%w(OPEN ID SEP ID CLOSE OPEN ID SEP ID CLOSE))
- end
-
- it "tokenizes {{.}} as OPEN ID CLOSE" do
- result = tokenize("{{.}}")
- result.should match_tokens(%w(OPEN ID CLOSE))
- end
-
- it "tokenizes a path as 'OPEN (ID SEP)* ID CLOSE'" do
- result = tokenize("{{../foo/bar}}")
- result.should match_tokens(%w(OPEN ID SEP ID SEP ID CLOSE))
- result[1].should be_token("ID", "..")
- end
-
- it "tokenizes a path with .. as a parent path" do
- result = tokenize("{{../foo.bar}}")
- result.should match_tokens(%w(OPEN ID SEP ID SEP ID CLOSE))
- result[1].should be_token("ID", "..")
- end
-
- it "tokenizes a path with this/foo as OPEN ID SEP ID CLOSE" do
- result = tokenize("{{this/foo}}")
- result.should match_tokens(%w(OPEN ID SEP ID CLOSE))
- result[1].should be_token("ID", "this")
- result[3].should be_token("ID", "foo")
- end
-
- it "tokenizes a simple mustache with spaces as 'OPEN ID CLOSE'" do
- result = tokenize("{{ foo }}")
- result.should match_tokens(%w(OPEN ID CLOSE))
- result[1].should be_token("ID", "foo")
- end
-
- it "tokenizes a simple mustache with line breaks as 'OPEN ID ID CLOSE'" do
- result = tokenize("{{ foo \n bar }}")
- result.should match_tokens(%w(OPEN ID ID CLOSE))
- result[1].should be_token("ID", "foo")
- end
-
- it "tokenizes raw content as 'CONTENT'" do
- result = tokenize("foo {{ bar }} baz")
- result.should match_tokens(%w(CONTENT OPEN ID CLOSE CONTENT))
- result[0].should be_token("CONTENT", "foo ")
- result[4].should be_token("CONTENT", " baz")
- end
-
- it "tokenizes a partial as 'OPEN_PARTIAL ID CLOSE'" do
- result = tokenize("{{> foo}}")
- result.should match_tokens(%w(OPEN_PARTIAL ID CLOSE))
- end
-
- it "tokenizes a partial with context as 'OPEN_PARTIAL ID ID CLOSE'" do
- result = tokenize("{{> foo bar }}")
- result.should match_tokens(%w(OPEN_PARTIAL ID ID CLOSE))
- end
-
- it "tokenizes a partial without spaces as 'OPEN_PARTIAL ID CLOSE'" do
- result = tokenize("{{>foo}}")
- result.should match_tokens(%w(OPEN_PARTIAL ID CLOSE))
- end
-
- it "tokenizes a partial space at the end as 'OPEN_PARTIAL ID CLOSE'" do
- result = tokenize("{{>foo }}")
- result.should match_tokens(%w(OPEN_PARTIAL ID CLOSE))
- end
-
- it "tokenizes a partial space at the end as 'OPEN_PARTIAL ID CLOSE'" do
- result = tokenize("{{>foo/bar.baz }}")
- result.should match_tokens(%w(OPEN_PARTIAL ID SEP ID SEP ID CLOSE))
- end
-
- it "tokenizes a comment as 'COMMENT'" do
- result = tokenize("foo {{! this is a comment }} bar {{ baz }}")
- result.should match_tokens(%w(CONTENT COMMENT CONTENT OPEN ID CLOSE))
- result[1].should be_token("COMMENT", " this is a comment ")
- end
-
- it "tokenizes a block comment as 'COMMENT'" do
- result = tokenize("foo {{!-- this is a {{comment}} --}} bar {{ baz }}")
- result.should match_tokens(%w(CONTENT COMMENT CONTENT OPEN ID CLOSE))
- result[1].should be_token("COMMENT", " this is a {{comment}} ")
- end
-
- it "tokenizes a block comment with whitespace as 'COMMENT'" do
- result = tokenize("foo {{!-- this is a\n{{comment}}\n--}} bar {{ baz }}")
- result.should match_tokens(%w(CONTENT COMMENT CONTENT OPEN ID CLOSE))
- result[1].should be_token("COMMENT", " this is a\n{{comment}}\n")
- end
-
- it "tokenizes open and closing blocks as 'OPEN_BLOCK ID CLOSE ... OPEN_ENDBLOCK ID CLOSE'" do
- result = tokenize("{{#foo}}content{{/foo}}")
- result.should match_tokens(%w(OPEN_BLOCK ID CLOSE CONTENT OPEN_ENDBLOCK ID CLOSE))
- end
-
- it "tokenizes inverse sections as 'OPEN_INVERSE CLOSE'" do
- tokenize("{{^}}").should match_tokens(%w(OPEN_INVERSE CLOSE))
- tokenize("{{else}}").should match_tokens(%w(OPEN_INVERSE CLOSE))
- tokenize("{{ else }}").should match_tokens(%w(OPEN_INVERSE CLOSE))
- end
-
- it "tokenizes inverse sections with ID as 'OPEN_INVERSE ID CLOSE'" do
- result = tokenize("{{^foo}}")
- result.should match_tokens(%w(OPEN_INVERSE ID CLOSE))
- result[1].should be_token("ID", "foo")
- end
-
- it "tokenizes inverse sections with ID and spaces as 'OPEN_INVERSE ID CLOSE'" do
- result = tokenize("{{^ foo }}")
- result.should match_tokens(%w(OPEN_INVERSE ID CLOSE))
- result[1].should be_token("ID", "foo")
- end
-
- it "tokenizes mustaches with params as 'OPEN ID ID ID CLOSE'" do
- result = tokenize("{{ foo bar baz }}")
- result.should match_tokens(%w(OPEN ID ID ID CLOSE))
- result[1].should be_token("ID", "foo")
- result[2].should be_token("ID", "bar")
- result[3].should be_token("ID", "baz")
- end
-
- it "tokenizes mustaches with String params as 'OPEN ID ID STRING CLOSE'" do
- result = tokenize("{{ foo bar \"baz\" }}")
- result.should match_tokens(%w(OPEN ID ID STRING CLOSE))
- result[3].should be_token("STRING", "baz")
- end
-
- it "tokenizes mustaches with String params using single quotes as 'OPEN ID ID STRING CLOSE'" do
- result = tokenize("{{ foo bar \'baz\' }}")
- result.should match_tokens(%w(OPEN ID ID STRING CLOSE))
- result[3].should be_token("STRING", "baz")
- end
-
- it "tokenizes String params with spaces inside as 'STRING'" do
- result = tokenize("{{ foo bar \"baz bat\" }}")
- result.should match_tokens(%w(OPEN ID ID STRING CLOSE))
- result[3].should be_token("STRING", "baz bat")
- end
-
- it "tokenizes String params with escapes quotes as 'STRING'" do
- result = tokenize(%|{{ foo "bar\\"baz" }}|)
- result.should match_tokens(%w(OPEN ID STRING CLOSE))
- result[2].should be_token("STRING", %{bar"baz})
- end
-
- it "tokenizes String params using single quotes with escapes quotes as 'STRING'" do
- result = tokenize(%|{{ foo 'bar\\'baz' }}|)
- result.should match_tokens(%w(OPEN ID STRING CLOSE))
- result[2].should be_token("STRING", %{bar'baz})
- end
-
- it "tokenizes numbers" do
- result = tokenize(%|{{ foo 1 }}|)
- result.should match_tokens(%w(OPEN ID INTEGER CLOSE))
- result[2].should be_token("INTEGER", "1")
-
- result = tokenize(%|{{ foo -1 }}|)
- result.should match_tokens(%w(OPEN ID INTEGER CLOSE))
- result[2].should be_token("INTEGER", "-1")
- end
-
- it "tokenizes booleans" do
- result = tokenize(%|{{ foo true }}|)
- result.should match_tokens(%w(OPEN ID BOOLEAN CLOSE))
- result[2].should be_token("BOOLEAN", "true")
-
- result = tokenize(%|{{ foo false }}|)
- result.should match_tokens(%w(OPEN ID BOOLEAN CLOSE))
- result[2].should be_token("BOOLEAN", "false")
- end
-
- it "tokenizes hash arguments" do
- result = tokenize("{{ foo bar=baz }}")
- result.should match_tokens %w(OPEN ID ID EQUALS ID CLOSE)
-
- result = tokenize("{{ foo bar baz=bat }}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS ID CLOSE)
-
- result = tokenize("{{ foo bar baz=1 }}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS INTEGER CLOSE)
-
- result = tokenize("{{ foo bar baz=true }}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS BOOLEAN CLOSE)
-
- result = tokenize("{{ foo bar baz=false }}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS BOOLEAN CLOSE)
-
- result = tokenize("{{ foo bar\n baz=bat }}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS ID CLOSE)
-
- result = tokenize("{{ foo bar baz=\"bat\" }}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS STRING CLOSE)
-
- result = tokenize("{{ foo bar baz=\"bat\" bam=wot }}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS STRING ID EQUALS ID CLOSE)
-
- result = tokenize("{{foo omg bar=baz bat=\"bam\"}}")
- result.should match_tokens %w(OPEN ID ID ID EQUALS ID ID EQUALS STRING CLOSE)
- result[2].should be_token("ID", "omg")
- end
-
- it "tokenizes special @ identifiers" do
- result = tokenize("{{ @foo }}")
- result.should match_tokens %w( OPEN DATA ID CLOSE )
- result[2].should be_token("ID", "foo")
-
- result = tokenize("{{ foo @bar }}")
- result.should match_tokens %w( OPEN ID DATA ID CLOSE )
- result[3].should be_token("ID", "bar")
-
- result = tokenize("{{ foo bar=@baz }}")
- result.should match_tokens %w( OPEN ID ID EQUALS DATA ID CLOSE )
- result[5].should be_token("ID", "baz")
- end
-
- it "does not time out in a mustache with a single } followed by EOF" do
- Timeout.timeout(1) { tokenize("{{foo}").should match_tokens(%w(OPEN ID)) }
- end
-
- it "does not time out in a mustache when invalid ID characters are used" do
- Timeout.timeout(1) { tokenize("{{foo & }}").should match_tokens(%w(OPEN ID)) }
- end
-end