Class | Html5TokenizerTestCase |
In: |
lib/feed_tools/vendor/html5/tests/test_tokenizer.rb
|
Parent: | Test::Unit::TestCase |
# File lib/feed_tools/vendor/html5/tests/test_tokenizer.rb, line 9 9: def assert_tokens_match(expectedTokens, receivedTokens, ignoreErrorOrder, message) 10: if !ignoreErrorOrder 11: return expectedTokens == receivedTokens 12: else 13: #Sort the tokens into two groups; non-parse errors and parse errors 14: expected = [[],[]] 15: received = [[],[]] 16: 17: for token in expectedTokens 18: if token != "ParseError" 19: expected[0] << token 20: else 21: expected[1] << token 22: end 23: end 24: 25: for token in receivedTokens 26: if token != "ParseError" 27: received[0] << token 28: else 29: received[1] << token 30: end 31: end 32: assert_equal expected, received, message 33: end 34: end
# File lib/feed_tools/vendor/html5/tests/test_tokenizer.rb, line 47 47: def concatenate_consecutive_characters(tokens) 48: tokens.inject([]) do |tokens, token| 49: if type_of?('Character', token) and tokens.any? and type_of?('Character', tokens.last) 50: tokens.last[1] = tokens.last[1] + token[1] 51: next tokens 52: end 53: tokens << token 54: end 55: end
# File lib/feed_tools/vendor/html5/tests/test_tokenizer.rb, line 40 40: def convert_attribute_arrays_to_hashes(tokens) 41: tokens.inject([]) do |tokens, token| 42: token[2] = Hash[*token[2].reverse.flatten] if type_of?('StartTag', token) 43: tokens << token 44: end 45: end
# File lib/feed_tools/vendor/html5/tests/test_tokenizer.rb, line 57 57: def tokenizer_test(data) 58: (data['contentModelFlags'] || [:PCDATA]).each do |content_model_flag| 59: message = [ 60: '', 'Description:', data['description'], 61: '', 'Input:', data['input'], 62: '', 'Content Model Flag:', content_model_flag, 63: '' ] * "\n" 64: 65: assert_nothing_raised message do 66: tokenizer = HTML5::HTMLTokenizer.new(data['input']) 67: 68: tokenizer.content_model_flag = content_model_flag.to_sym 69: 70: tokenizer.current_token = {:type => :startTag, :name => data['lastStartTag']} if data.has_key?('lastStartTag') 71: 72: tokens = TokenizerTestParser.new(tokenizer).parse 73: 74: actual = concatenate_consecutive_characters(convert_attribute_arrays_to_hashes(tokens)) 75: 76: expected = concatenate_consecutive_characters(data['output']) 77: 78: assert_tokens_match expected, actual, data["ignoreErrorOrder"], message 79: end 80: end 81: end