From 9448b963f93d65bd6ebb1eadf692a94abe482814 Mon Sep 17 00:00:00 2001 From: Rupinder Singh Khokhar Date: Fri, 20 Jun 2014 05:32:21 +0530 Subject: [Headache] Fixed tester & modified test file :-P --- test/data/tokeniser2/INDEX | 2 +- test/data/tokeniser2/unicodeCharsProblematic.test | 18 +++++++++--------- test/tokeniser2.c | 7 +++---- test/tokeniser3.c | 6 +++--- 4 files changed, 16 insertions(+), 17 deletions(-) diff --git a/test/data/tokeniser2/INDEX b/test/data/tokeniser2/INDEX index 4b9e037..fe2b6e6 100644 --- a/test/data/tokeniser2/INDEX +++ b/test/data/tokeniser2/INDEX @@ -16,5 +16,5 @@ regression.test Regression tests #domjs.test NA namedEntities.test html5lib named entities tests pendingSpecChanges.test NA -#unicodeCharsProblematic.test NA +unicodeCharsProblematic.test html5lib unicode exceptions tests #xmlViolation.test NA diff --git a/test/data/tokeniser2/unicodeCharsProblematic.test b/test/data/tokeniser2/unicodeCharsProblematic.test index 5987845..91b22f8 100644 --- a/test/data/tokeniser2/unicodeCharsProblematic.test +++ b/test/data/tokeniser2/unicodeCharsProblematic.test @@ -1,27 +1,27 @@ {"tests" : [ {"description": "Invalid Unicode character U+DFFF", "doubleEscaped":true, -"input": "\\uDFFF", -"output":["ParseError", ["Character", "\\uFFFD"]]}, +"input": "\uDFFF", +"output":["ParseError", ["Character", "\uFFFD"]]}, {"description": "Invalid Unicode character U+D800", "doubleEscaped":true, -"input": "\\uD800", -"output":["ParseError", ["Character", "\\uFFFD"]]}, +"input": "\uD800", +"output":["ParseError", ["Character", "\uFFFD"]]}, {"description": "Invalid Unicode character U+DFFF with valid preceding character", "doubleEscaped":true, -"input": "a\\uDFFF", -"output":[["Character", "a"], "ParseError", ["Character", "\\uFFFD"]]}, +"input": "a\uDFFF", +"output":[["Character", "a"], "ParseError", ["Character", "\uFFFD"]]}, {"description": "Invalid Unicode character U+D800 with valid following character", "doubleEscaped":true, -"input": "\\uD800a", -"output":["ParseError", ["Character", "\\uFFFDa"]]}, +"input": "\uD800a", +"output":["ParseError", ["Character", "\uFFFDa"]]}, {"description":"CR followed by U+0000", "input":"\r\u0000", "output":[["Character", "\n"], "ParseError", ["Character", "\u0000"]], "ignoreErrorOrder":true} ] -} \ No newline at end of file +} diff --git a/test/tokeniser2.c b/test/tokeniser2.c index a0264c9..6415222 100644 --- a/test/tokeniser2.c +++ b/test/tokeniser2.c @@ -444,12 +444,11 @@ hubbub_error token_handler(const hubbub_token *token, void *pw) hubbub_token t; t.type = HUBBUB_TOKEN_CHARACTER; - t.data.character.ptr += len; - t.data.character.len -= len; - + t.data.character.ptr = token->data.character.ptr + len; + t.data.character.len = token->data.character.len - len; ctx->char_off = 0; - token_handler(&t, pw); + return token_handler(&t, pw); } else if (strlen(expstr + ctx->char_off) > token->data.character.len) { /* Tokeniser output only contained part of the data diff --git a/test/tokeniser3.c b/test/tokeniser3.c index 86f079b..b514256 100644 --- a/test/tokeniser3.c +++ b/test/tokeniser3.c @@ -451,12 +451,12 @@ hubbub_error token_handler(const hubbub_token *token, void *pw) hubbub_token t; t.type = HUBBUB_TOKEN_CHARACTER; - t.data.character.ptr += len; - t.data.character.len -= len; + t.data.character.ptr = token->data.character.ptr + len; + t.data.character.len = token->data.character.len - len; ctx->char_off = 0; - token_handler(&t, pw); + return token_handler(&t, pw); } else if (strlen(expstr + ctx->char_off) > token->data.character.len) { /* Tokeniser output only contained part of the data -- cgit v1.2.3