test.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. """
  2. Run chardet on a bunch of documents and see that we get the correct encodings.
  3. :author: Dan Blanchard
  4. :author: Ian Cordasco
  5. """
  6. from __future__ import with_statement
  7. import textwrap
  8. from difflib import ndiff
  9. from io import open
  10. from os import listdir
  11. from os.path import dirname, isdir, join, realpath, relpath, splitext
  12. try:
  13. import hypothesis.strategies as st
  14. from hypothesis import given, assume, settings, Verbosity
  15. HAVE_HYPOTHESIS = True
  16. except ImportError:
  17. HAVE_HYPOTHESIS = False
  18. import pytest
  19. import chardet
  20. # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) after we
  21. # retrain model.
  22. MISSING_ENCODINGS = set(['iso-8859-2', 'iso-8859-6', 'windows-1250',
  23. 'windows-1254', 'windows-1256'])
  24. EXPECTED_FAILURES = set(['tests/iso-8859-7-greek/disabled.gr.xml',
  25. 'tests/iso-8859-9-turkish/divxplanet.com.xml',
  26. 'tests/iso-8859-9-turkish/subtitle.srt',
  27. 'tests/iso-8859-9-turkish/wikitop_tr_ISO-8859-9.txt'])
  28. def gen_test_params():
  29. """Yields tuples of paths and encodings to use for test_encoding_detection"""
  30. base_path = relpath(join(dirname(realpath(__file__)), 'tests'))
  31. for encoding in listdir(base_path):
  32. path = join(base_path, encoding)
  33. # Skip files in tests directory
  34. if not isdir(path):
  35. continue
  36. # Remove language suffixes from encoding if pressent
  37. encoding = encoding.lower()
  38. for postfix in ['-arabic', '-bulgarian', '-cyrillic', '-greek',
  39. '-hebrew', '-hungarian', '-turkish']:
  40. if encoding.endswith(postfix):
  41. encoding = encoding.rpartition(postfix)[0]
  42. break
  43. # Skip directories for encodings we don't handle yet.
  44. if encoding in MISSING_ENCODINGS:
  45. continue
  46. # Test encoding detection for each file we have of encoding for
  47. for file_name in listdir(path):
  48. ext = splitext(file_name)[1].lower()
  49. if ext not in ['.html', '.txt', '.xml', '.srt']:
  50. continue
  51. full_path = join(path, file_name)
  52. test_case = full_path, encoding
  53. if full_path in EXPECTED_FAILURES:
  54. test_case = pytest.mark.xfail(test_case)
  55. yield test_case
  56. @pytest.mark.parametrize ('file_name, encoding', gen_test_params())
  57. def test_encoding_detection(file_name, encoding):
  58. with open(file_name, 'rb') as f:
  59. input_bytes = f.read()
  60. result = chardet.detect(input_bytes)
  61. try:
  62. expected_unicode = input_bytes.decode(encoding)
  63. except LookupError:
  64. expected_unicode = ''
  65. try:
  66. detected_unicode = input_bytes.decode(result['encoding'])
  67. except (LookupError, UnicodeDecodeError, TypeError):
  68. detected_unicode = ''
  69. if result:
  70. encoding_match = (result['encoding'] or '').lower() == encoding
  71. else:
  72. encoding_match = False
  73. # Only care about mismatches that would actually result in different
  74. # behavior when decoding
  75. if not encoding_match and expected_unicode != detected_unicode:
  76. wrapped_expected = '\n'.join(textwrap.wrap(expected_unicode, 100)) + '\n'
  77. wrapped_detected = '\n'.join(textwrap.wrap(detected_unicode, 100)) + '\n'
  78. diff = ''.join(ndiff(wrapped_expected.splitlines(True),
  79. wrapped_detected.splitlines(True)))
  80. else:
  81. diff = ''
  82. encoding_match = True
  83. assert encoding_match, ("Expected %s, but got %s for %s. Character "
  84. "differences: \n%s" % (encoding,
  85. result,
  86. file_name,
  87. diff))
  88. if HAVE_HYPOTHESIS:
  89. class JustALengthIssue(Exception):
  90. pass
  91. @pytest.mark.xfail
  92. @given(st.text(min_size=1), st.sampled_from(['ascii', 'utf-8', 'utf-16',
  93. 'utf-32', 'iso-8859-7',
  94. 'iso-8859-8', 'windows-1255']),
  95. st.randoms())
  96. @settings(max_examples=200)
  97. def test_never_fails_to_detect_if_there_is_a_valid_encoding(txt, enc, rnd):
  98. try:
  99. data = txt.encode(enc)
  100. except UnicodeEncodeError:
  101. assume(False)
  102. detected = chardet.detect(data)['encoding']
  103. if detected is None:
  104. with pytest.raises(JustALengthIssue):
  105. @given(st.text(), random=rnd)
  106. @settings(verbosity=Verbosity.quiet, max_shrinks=0, max_examples=50)
  107. def string_poisons_following_text(suffix):
  108. try:
  109. extended = (txt + suffix).encode(enc)
  110. except UnicodeEncodeError:
  111. assume(False)
  112. result = chardet.detect(extended)
  113. if result and result['encoding'] is not None:
  114. raise JustALengthIssue()