10 Commits

Author SHA1 Message Date
Maxim Vladimirskiy
14f106ee76 Operate on unicode data exclusively 2022-02-04 17:31:53 +03:00
Maxim Vladimirskiy
a8c7e6a972 Merge pull request #226 from mailgun/maxim/develop
PIP-1562: Remove max tags limit [python3]
2022-01-06 15:24:57 +03:00
Maxim Vladimirskiy
b30c375c5b Expose extract_from_html_tree 2022-01-06 15:16:43 +03:00
Maxim Vladimirskiy
cec5acf58f Remove max tags limit 2022-01-06 14:18:11 +03:00
Maxim Vladimirskiy
24d0f2d00a Merge pull request #223 from mailgun/maxim/develop
PIP-1509: Optimise sender name check [python3]
2021-11-19 13:11:29 +03:00
Maxim Vladimirskiy
94007b0b92 Optimise sender name check 2021-11-19 11:12:26 +03:00
Maxim Vladimirskiy
1a5548f171 Merge pull request #222 from mailgun/maxim/develop
PIP-1409: Remove version pins from setup.py [python3]
2021-11-11 16:29:30 +03:00
Maxim Vladimirskiy
53c49b9121 Remove version pins from setup.py 2021-11-11 15:36:50 +03:00
Matt Dietz
bd50872043 Merge pull request #217 from mailgun/dietz/REP-1030
Drops Python 2 support [python3]
2021-06-15 09:46:29 -05:00
Matt Dietz
d37c4fd551 Drops Python 2 support
REP-1030

In addition to some python 2 => 3 fixes, this change bumps the scikit-learn
version to latest. The previously pinned version of scikit-learn failed trying
to compile all necessary C modules under python 3.7+ due to included header files
that weren't compatible with C the API implemented in python 3.7+.

Simultaneously, with the restrictive compatibility supported by scikit-learn,
it seemed prudent to drop python 2 support altogether. Otherwise, we'd be stuck
with python 3.4 as the newest possible version we could support.

With this change, tests are currently passing under 3.9.2.

Lastly, imports the original training data. At some point, a new version
of the training data was committed to the repo but no classifier was
trained from it. Using a classifier trained from this new data resulted
in most of the tests failing.
2021-06-10 14:03:25 -05:00
16 changed files with 2774 additions and 2783 deletions

20
.build/Dockerfile Normal file
View File

@@ -0,0 +1,20 @@
FROM python:3.9-slim-buster AS deps
RUN apt-get update && \
apt-get install -y build-essential git curl python3-dev libatlas3-base libatlas-base-dev liblapack-dev libxml2 libxml2-dev libffi6 libffi-dev musl-dev libxslt-dev
FROM deps AS testable
ARG REPORT_PATH
VOLUME ["/var/mailgun", "/etc/mailgun/ssl", ${REPORT_PATH}]
ADD . /app
WORKDIR /app
COPY wheel/* /wheel/
RUN mkdir -p ${REPORT_PATH}
RUN python ./setup.py build bdist_wheel -d /wheel && \
pip install --no-deps /wheel/*
ENTRYPOINT ["/bin/sh", "/app/run_tests.sh"]

3
.gitignore vendored
View File

@@ -54,3 +54,6 @@ _trial_temp
# OSX
.DS_Store
# vim-backup
*.bak

11
requirements.txt Normal file
View File

@@ -0,0 +1,11 @@
chardet>=1.0.1
cchardet>=0.3.5
cssselect
html5lib
joblib
lxml>=2.3.3
numpy
regex>=1
scikit-learn>=1.0.0
scipy
six>=1.10.0

4
run_tests.sh Executable file
View File

@@ -0,0 +1,4 @@
#!/usr/bin/env bash
set -ex
REPORT_PATH="${REPORT_PATH:-./}"
nosetests --with-xunit --with-coverage --cover-xml --cover-xml-file $REPORT_PATH/coverage.xml --xunit-file=$REPORT_PATH/nosetests.xml --cover-package=talon .

View File

@@ -19,17 +19,17 @@ class InstallCommand(install):
if self.no_ml:
dist = self.distribution
dist.packages=find_packages(exclude=[
'tests',
'tests.*',
'talon.signature',
'talon.signature.*',
"tests",
"tests.*",
"talon.signature",
"talon.signature.*",
])
for not_required in ['numpy', 'scipy', 'scikit-learn==0.16.1']:
for not_required in ["numpy", "scipy", "scikit-learn==0.24.1"]:
dist.install_requires.remove(not_required)
setup(name='talon',
version='1.4.8',
version='1.6.0',
description=("Mailgun library "
"to extract message quotations and signatures."),
long_description=open("README.rst").read(),
@@ -44,20 +44,21 @@ setup(name='talon',
include_package_data=True,
zip_safe=True,
install_requires=[
"lxml>=2.3.3",
"regex>=1",
"lxml",
"regex",
"numpy",
"scipy",
"scikit-learn==0.16.1", # pickled versions of classifier, else rebuild
'chardet>=1.0.1',
'cchardet>=0.3.5',
'cssselect',
'six>=1.10.0',
'html5lib'
"scikit-learn>=1.0.0",
"chardet",
"cchardet",
"cssselect",
"six",
"html5lib",
"joblib",
],
tests_require=[
"mock",
"nose>=1.2.1",
"nose",
"coverage"
]
)

View File

@@ -6,18 +6,17 @@ original messages (without quoted messages)
"""
from __future__ import absolute_import
import regex as re
import logging
from copy import deepcopy
from lxml import html, etree
from talon.utils import (get_delimiter, html_tree_to_text,
html_document_fromstring)
from talon import html_quotations
import regex as re
from lxml import etree, html
from six.moves import range
import six
from talon import html_quotations
from talon.utils import (get_delimiter, html_document_fromstring,
html_tree_to_text)
log = logging.getLogger(__name__)
@@ -94,7 +93,7 @@ RE_ON_DATE_WROTE_SMB = re.compile(
)
RE_QUOTATION = re.compile(
r'''
r"""
(
# quotation border: splitter line or a number of quotation marker lines
(?:
@@ -112,10 +111,10 @@ RE_QUOTATION = re.compile(
# after quotations should be text only or nothing at all
[te]*$
''', re.VERBOSE)
""", re.VERBOSE)
RE_EMPTY_QUOTATION = re.compile(
r'''
r"""
(
# quotation border: splitter line or a number of quotation marker lines
(?:
@@ -125,7 +124,7 @@ RE_EMPTY_QUOTATION = re.compile(
)
)
e*
''', re.VERBOSE)
""", re.VERBOSE)
# ------Original Message------ or ---- Reply Message ----
# With variations in other languages.
@@ -193,9 +192,6 @@ RE_PARENTHESIS_LINK = re.compile("\(https?://")
SPLITTER_MAX_LINES = 6
MAX_LINES_COUNT = 1000
# an extensive research shows that exceeding this limit
# leads to excessive processing time
MAX_HTML_LEN = 2794202
QUOT_PATTERN = re.compile('^>+ ?')
NO_QUOT_LINE = re.compile('^[^>].*[\S].*')
@@ -346,9 +342,6 @@ def _replace_link_brackets(msg_body):
Converts msg_body into a unicode
"""
if isinstance(msg_body, bytes):
msg_body = msg_body.decode('utf8')
def link_wrapper(link):
newline_index = msg_body[:link.start()].rfind("\n")
if msg_body[newline_index + 1] == ">":
@@ -388,8 +381,6 @@ def postprocess(msg_body):
def extract_from_plain(msg_body):
"""Extracts a non quoted message from provided plain text."""
stripped_text = msg_body
delimiter = get_delimiter(msg_body)
msg_body = preprocess(msg_body, delimiter)
# don't process too long messages
@@ -421,25 +412,27 @@ def extract_from_html(msg_body):
Returns a unicode string.
"""
if isinstance(msg_body, six.text_type):
msg_body = msg_body.encode('utf8')
elif not isinstance(msg_body, bytes):
msg_body = msg_body.encode('ascii')
if msg_body.strip() == "":
return msg_body
result = _extract_from_html(msg_body)
if isinstance(result, bytes):
result = result.decode('utf8')
msg_body = msg_body.replace("\r\n", "\n")
# Cut out xml and doctype tags to avoid conflict with unicode decoding.
msg_body = re.sub(r"\<\?xml.+\?\>|\<\!DOCTYPE.+]\>", "", msg_body)
html_tree = html_document_fromstring(msg_body)
if html_tree is None:
return msg_body
result = extract_from_html_tree(html_tree)
if not result:
return msg_body
return result
def _extract_from_html(msg_body):
def extract_from_html_tree(html_tree):
"""
Extract not quoted message from provided html message body
using tags and plain text algorithm.
Cut out first some encoding html tags such as xml and doctype
for avoiding conflict with unicode decoding
Extract not quoted message from provided parsed html tree using tags and
plain text algorithm.
Cut out the 'blockquote', 'gmail_quote' tags.
Cut Microsoft quotations.
@@ -452,18 +445,6 @@ def _extract_from_html(msg_body):
then checking deleted checkpoints,
then deleting necessary tags.
"""
if msg_body.strip() == b'':
return msg_body
msg_body = msg_body.replace(b'\r\n', b'\n')
msg_body = re.sub(r"\<\?xml.+\?\>|\<\!DOCTYPE.+]\>", "", msg_body)
html_tree = html_document_fromstring(msg_body)
if html_tree is None:
return msg_body
cut_quotations = (html_quotations.cut_gmail_quote(html_tree) or
html_quotations.cut_zimbra_quote(html_tree) or
html_quotations.cut_blockquote(html_tree) or
@@ -481,7 +462,7 @@ def _extract_from_html(msg_body):
# Don't process too long messages
if len(lines) > MAX_LINES_COUNT:
return msg_body
return None
# Collect checkpoints on each line
line_checkpoints = [
@@ -500,7 +481,7 @@ def _extract_from_html(msg_body):
lines_were_deleted, first_deleted, last_deleted = return_flags
if not lines_were_deleted and not cut_quotations:
return msg_body
return None
if lines_were_deleted:
#collect checkpoints from deleted lines
@@ -514,7 +495,7 @@ def _extract_from_html(msg_body):
)
if _readable_text_empty(html_tree_copy):
return msg_body
return None
# NOTE: We remove_namespaces() because we are using an HTML5 Parser, HTML
# parsers do not recognize namespaces in HTML tags. As such the rendered
@@ -540,7 +521,11 @@ def _extract_from_html(msg_body):
# of replacing data outside the <tag> which might be essential to
# the customer.
remove_namespaces(html_tree_copy)
return html.tostring(html_tree_copy)
s = html.tostring(html_tree_copy, encoding="ascii")
if not s:
return None
return s.decode("ascii")
def remove_namespaces(root):
@@ -659,10 +644,10 @@ def _readable_text_empty(html_tree):
def is_splitter(line):
'''
"""
Returns Matcher object if provided string is a splitter and
None otherwise.
'''
"""
for pattern in SPLITTER_PATTERNS:
matcher = re.match(pattern, line)
if matcher:
@@ -670,12 +655,12 @@ def is_splitter(line):
def text_content(context):
'''XPath Extension function to return a node text content.'''
"""XPath Extension function to return a node text content."""
return context.context_node.xpath("string()").strip()
def tail(context):
'''XPath Extension function to return a node tail text.'''
"""XPath Extension function to return a node tail text."""
return context.context_node.tail or ''

View File

@@ -23,17 +23,14 @@ trained against, don't forget to regenerate:
from __future__ import absolute_import
import os
from . import extraction
from . extraction import extract #noqa
from . learning import classifier
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
EXTRACTOR_FILENAME = os.path.join(DATA_DIR, 'classifier')
EXTRACTOR_DATA = os.path.join(DATA_DIR, 'train.data')
from talon.signature import extraction
from talon.signature.extraction import extract
from talon.signature.learning import classifier
def initialize():
extraction.EXTRACTOR = classifier.load(EXTRACTOR_FILENAME,
EXTRACTOR_DATA)
data_dir = os.path.join(os.path.dirname(__file__), 'data')
extractor_filename = os.path.join(data_dir, 'classifier')
extractor_data_filename = os.path.join(data_dir, 'train.data')
extraction.EXTRACTOR = classifier.load(extractor_filename,
extractor_data_filename)

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -8,7 +8,7 @@ body belongs to the signature.
from __future__ import absolute_import
from numpy import genfromtxt
from sklearn.externals import joblib
import joblib
from sklearn.svm import LinearSVC

View File

@@ -5,21 +5,17 @@
* regexp's constants used when evaluating signature's features
"""
from __future__ import absolute_import
import unicodedata
import regex as re
from talon.utils import to_unicode
from talon.signature.constants import SIGNATURE_MAX_LINES
rc = re.compile
RE_EMAIL = rc('\S@\S')
RE_RELAX_PHONE = rc('(\(? ?[\d]{2,3} ?\)?.{,3}?){2,}')
RE_URL = rc(r'''https?://|www\.[\S]+\.[\S]''')
RE_URL = rc(r"""https?://|www\.[\S]+\.[\S]""")
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
@@ -55,7 +51,7 @@ BAD_SENDER_NAMES = [
def binary_regex_search(prog):
'''Returns a function that returns 1 or 0 depending on regex search result.
"""Returns a function that returns 1 or 0 depending on regex search result.
If regular expression compiled into prog is present in a string
the result of calling the returned function with the string will be 1
@@ -66,12 +62,12 @@ def binary_regex_search(prog):
1
>>> binary_regex_search(re.compile("12"))("34")
0
'''
"""
return lambda s: 1 if prog.search(s) else 0
def binary_regex_match(prog):
'''Returns a function that returns 1 or 0 depending on regex match result.
"""Returns a function that returns 1 or 0 depending on regex match result.
If a string matches regular expression compiled into prog
the result of calling the returned function with the string will be 1
@@ -82,7 +78,7 @@ def binary_regex_match(prog):
1
>>> binary_regex_match(re.compile("12"))("3 12")
0
'''
"""
return lambda s: 1 if prog.match(s) else 0
@@ -102,7 +98,7 @@ def flatten_list(list_to_flatten):
def contains_sender_names(sender):
'''Returns a functions to search sender\'s name or it\'s part.
"""Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
@@ -115,7 +111,7 @@ def contains_sender_names(sender):
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1
'''
"""
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
for e in extract_names(sender)]))
names = names or sender
@@ -135,20 +131,25 @@ def extract_names(sender):
>>> extract_names('')
[]
"""
sender = to_unicode(sender, precise=True)
# Remove non-alphabetical characters
sender = "".join([char if char.isalpha() else ' ' for char in sender])
# Remove too short words and words from "black" list i.e.
# words like `ru`, `gmail`, `com`, `org`, etc.
sender = [word for word in sender.split() if len(word) > 1 and
not word in BAD_SENDER_NAMES]
# Remove duplicates
names = list(set(sender))
names = list()
for word in sender.split():
if len(word) < 2:
continue
if word in BAD_SENDER_NAMES:
continue
if word in names:
continue
names.append(word)
return names
def categories_percent(s, categories):
'''Returns category characters percent.
"""Returns category characters percent.
>>> categories_percent("qqq ggg hhh", ["Po"])
0.0
@@ -160,9 +161,8 @@ def categories_percent(s, categories):
50.0
>>> categories_percent("s.s,5s", ["Po", "Nd"])
50.0
'''
"""
count = 0
s = to_unicode(s, precise=True)
for c in s:
if unicodedata.category(c) in categories:
count += 1
@@ -170,19 +170,18 @@ def categories_percent(s, categories):
def punctuation_percent(s):
'''Returns punctuation percent.
"""Returns punctuation percent.
>>> punctuation_percent("qqq ggg hhh")
0.0
>>> punctuation_percent("q,w.")
50.0
'''
"""
return categories_percent(s, ['Po'])
def capitalized_words_percent(s):
'''Returns capitalized words percent.'''
s = to_unicode(s, precise=True)
"""Returns capitalized words percent."""
words = re.split('\s', s)
words = [w for w in words if w.strip()]
words = [w for w in words if len(w) > 2]
@@ -208,20 +207,26 @@ def many_capitalized_words(s):
def has_signature(body, sender):
'''Checks if the body has signature. Returns True or False.'''
"""Checks if the body has signature. Returns True or False."""
non_empty = [line for line in body.splitlines() if line.strip()]
candidate = non_empty[-SIGNATURE_MAX_LINES:]
upvotes = 0
sender_check = contains_sender_names(sender)
for line in candidate:
# we check lines for sender's name, phone, email and url,
# those signature lines don't take more then 27 lines
if len(line.strip()) > 27:
continue
elif contains_sender_names(sender)(line):
if sender_check(line):
return True
elif (binary_regex_search(RE_RELAX_PHONE)(line) +
binary_regex_search(RE_EMAIL)(line) +
binary_regex_search(RE_URL)(line) == 1):
if (binary_regex_search(RE_RELAX_PHONE)(line) +
binary_regex_search(RE_EMAIL)(line) +
binary_regex_search(RE_URL)(line) == 1):
upvotes += 1
if upvotes > 1:
return True
return False

View File

@@ -1,110 +1,17 @@
# coding:utf-8
from __future__ import annotations
from __future__ import absolute_import
from random import shuffle
import cchardet
import chardet
import html5lib
import regex as re
import six
from html5lib import HTMLParser
from lxml.cssselect import CSSSelector
from lxml.etree import _Element
from lxml.html import html5parser
from talon.constants import RE_DELIMITER
def safe_format(format_string, *args, **kwargs):
"""
Helper: formats string with any combination of bytestrings/unicode
strings without raising exceptions
"""
try:
if not args and not kwargs:
return format_string
else:
return format_string.format(*args, **kwargs)
# catch encoding errors and transform everything into utf-8 string
# before logging:
except (UnicodeEncodeError, UnicodeDecodeError):
format_string = to_utf8(format_string)
args = [to_utf8(p) for p in args]
kwargs = {k: to_utf8(v) for k, v in six.iteritems(kwargs)}
return format_string.format(*args, **kwargs)
# ignore other errors
except:
return u''
def to_unicode(str_or_unicode, precise=False):
"""
Safely returns a unicode version of a given string
>>> utils.to_unicode('привет')
u'привет'
>>> utils.to_unicode(u'привет')
u'привет'
If `precise` flag is True, tries to guess the correct encoding first.
"""
if not isinstance(str_or_unicode, six.text_type):
encoding = quick_detect_encoding(str_or_unicode) if precise else 'utf-8'
return six.text_type(str_or_unicode, encoding, 'replace')
return str_or_unicode
def detect_encoding(string):
"""
Tries to detect the encoding of the passed string.
Defaults to UTF-8.
"""
assert isinstance(string, bytes)
try:
detected = chardet.detect(string)
if detected:
return detected.get('encoding') or 'utf-8'
except Exception as e:
pass
return 'utf-8'
def quick_detect_encoding(string):
"""
Tries to detect the encoding of the passed string.
Uses cchardet. Fallbacks to detect_encoding.
"""
assert isinstance(string, bytes)
try:
detected = cchardet.detect(string)
if detected:
return detected.get('encoding') or detect_encoding(string)
except Exception as e:
pass
return detect_encoding(string)
def to_utf8(str_or_unicode):
"""
Safely returns a UTF-8 version of a given string
>>> utils.to_utf8(u'hi')
'hi'
"""
if not isinstance(str_or_unicode, six.text_type):
return str_or_unicode.encode("utf-8", "ignore")
return str(str_or_unicode)
def random_token(length=7):
vals = ("a b c d e f g h i j k l m n o p q r s t u v w x y z "
"0 1 2 3 4 5 6 7 8 9").split(' ')
shuffle(vals)
return ''.join(vals[:length])
def get_delimiter(msg_body):
def get_delimiter(msg_body: str) -> str:
delimiter = RE_DELIMITER.search(msg_body)
if delimiter:
delimiter = delimiter.group()
@@ -114,7 +21,7 @@ def get_delimiter(msg_body):
return delimiter
def html_tree_to_text(tree):
def html_tree_to_text(tree: _Element) -> str:
for style in CSSSelector('style')(tree):
style.getparent().remove(style)
@@ -146,26 +53,22 @@ def html_tree_to_text(tree):
not text.endswith("\n") and not el_text):
text += "\n"
retval = _rm_excessive_newlines(text)
return _encode_utf8(retval)
text = _rm_excessive_newlines(text)
return text
def html_to_text(string):
def html_to_text(s: str) -> str | None:
"""
Dead-simple HTML-to-text converter:
>>> html_to_text("one<br>two<br>three")
>>> "one\ntwo\nthree"
<<< "one\ntwo\nthree"
NOTES:
1. the string is expected to contain UTF-8 encoded HTML!
2. returns utf-8 encoded str (not unicode)
3. if html can't be parsed returns None
"""
if isinstance(string, six.text_type):
string = string.encode('utf8')
s = _prepend_utf8_declaration(string)
s = s.replace(b"\n", b"")
s = _prepend_utf8_declaration(s)
s = s.replace("\n", "")
tree = html_fromstring(s)
if tree is None:
@@ -174,74 +77,46 @@ def html_to_text(string):
return html_tree_to_text(tree)
def html_fromstring(s):
def html_fromstring(s: str) -> _Element:
"""Parse html tree from string. Return None if the string can't be parsed.
"""
if isinstance(s, six.text_type):
s = s.encode('utf8')
try:
if html_too_big(s):
return None
return html5parser.fromstring(s, parser=_html5lib_parser())
except Exception:
pass
return html5parser.fromstring(s, parser=_html5lib_parser())
def html_document_fromstring(s):
def html_document_fromstring(s: str) -> _Element:
"""Parse html tree from string. Return None if the string can't be parsed.
"""
if isinstance(s, six.text_type):
s = s.encode('utf8')
try:
if html_too_big(s):
return None
return html5parser.document_fromstring(s, parser=_html5lib_parser())
except Exception:
pass
return html5parser.document_fromstring(s, parser=_html5lib_parser())
def cssselect(expr, tree):
def cssselect(expr: str, tree: str) -> list[_Element]:
return CSSSelector(expr)(tree)
def html_too_big(s):
if isinstance(s, six.text_type):
s = s.encode('utf8')
return s.count(b'<') > _MAX_TAGS_COUNT
def _contains_charset_spec(s):
def _contains_charset_spec(s: str) -> str:
"""Return True if the first 4KB contain charset spec
"""
return s.lower().find(b'html; charset=', 0, 4096) != -1
return s.lower().find('html; charset=', 0, 4096) != -1
def _prepend_utf8_declaration(s):
def _prepend_utf8_declaration(s: str) -> str:
"""Prepend 'utf-8' encoding declaration if the first 4KB don't have any
"""
return s if _contains_charset_spec(s) else _UTF8_DECLARATION + s
def _rm_excessive_newlines(s):
def _rm_excessive_newlines(s: str) -> str:
"""Remove excessive newlines that often happen due to tons of divs
"""
return _RE_EXCESSIVE_NEWLINES.sub("\n\n", s).strip()
def _encode_utf8(s):
"""Encode in 'utf-8' if unicode
"""
return s.encode('utf-8') if isinstance(s, six.text_type) else s
def _html5lib_parser():
def _html5lib_parser() -> HTMLParser:
"""
html5lib is a pure-python library that conforms to the WHATWG HTML spec
and is not vulnarable to certain attacks common for XML libraries
"""
return html5lib.HTMLParser(
return HTMLParser(
# build lxml tree
html5lib.treebuilders.getTreeBuilder("lxml"),
# remove namespace value from inside lxml.html.html5paser element tag
@@ -251,14 +126,10 @@ def _html5lib_parser():
)
_UTF8_DECLARATION = (b'<meta http-equiv="Content-Type" content="text/html;'
b'charset=utf-8">')
_UTF8_DECLARATION = ('<meta http-equiv="Content-Type" content="text/html;'
'charset=utf-8">')
_BLOCKTAGS = ['div', 'p', 'ul', 'li', 'h1', 'h2', 'h3']
_HARDBREAKS = ['br', 'hr', 'tr']
_RE_EXCESSIVE_NEWLINES = re.compile("\n{2,10}")
# an extensive research shows that exceeding this limit
# might lead to excessive processing time
_MAX_TAGS_COUNT = 419

3
test-requirements.txt Normal file
View File

@@ -0,0 +1,3 @@
coverage
mock
nose>=1.2.1

View File

@@ -4,14 +4,17 @@ from __future__ import absolute_import
# noinspection PyUnresolvedReferences
import re
from unittest.mock import Mock, patch
from nose.tools import assert_false, assert_true, eq_, ok_
from tests.fixtures import (OLK_SRC_BODY_SECTION,
REPLY_QUOTATIONS_SHARE_BLOCK,
REPLY_SEPARATED_BY_HR)
from talon import quotations, utils as u
from . import *
from .fixtures import *
from lxml import html
RE_WHITESPACE = re.compile("\s")
RE_DOUBLE_WHITESPACE = re.compile("\s")
RE_WHITESPACE = re.compile(r"\s")
RE_DOUBLE_WHITESPACE = re.compile(r"\s")
def test_quotation_splitter_inside_blockquote():
@@ -166,7 +169,7 @@ def test_unicode_in_reply():
<blockquote>
Quote
</blockquote>""".encode("utf-8")
</blockquote>"""
eq_("<html><head></head><body>Reply&#160;&#160;Text<br><div><br></div>"
"</body></html>",
@@ -314,7 +317,6 @@ def extract_reply_and_check(filename):
msg_body = f.read()
reply = quotations.extract_from_html(msg_body)
plain_reply = u.html_to_text(reply)
plain_reply = plain_reply.decode('utf8')
eq_(RE_WHITESPACE.sub('', "Hi. I am fine.\n\nThanks,\nAlex"),
RE_WHITESPACE.sub('', plain_reply))
@@ -391,18 +393,6 @@ def test_gmail_forwarded_msg():
eq_(RE_WHITESPACE.sub('', msg_body), RE_WHITESPACE.sub('', extracted))
@patch.object(u, '_MAX_TAGS_COUNT', 4)
def test_too_large_html():
msg_body = 'Reply' \
'<div class="gmail_quote">' \
'<div class="gmail_quote">On 11-Apr-2011, at 6:54 PM, Bob &lt;bob@example.com&gt; wrote:' \
'<div>Test</div>' \
'</div>' \
'</div>'
eq_(RE_WHITESPACE.sub('', msg_body),
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
def test_readable_html_empty():
msg_body = """
<blockquote>

View File

@@ -826,10 +826,10 @@ The user experience was unparallelled. Please continue production. I'm sending p
that this line is intact."""
parsed = quotations.extract_from_plain(msg_body)
eq_(msg_body, parsed.decode('utf8'))
eq_(msg_body, parsed)
def test_appointment():
def test_appointment_2():
msg_body = """Invitation for an interview:
Date: Wednesday 3, October 2011
@@ -838,4 +838,4 @@ Address: 130 Fox St
Please bring in your ID."""
parsed = quotations.extract_from_plain(msg_body)
eq_(msg_body, parsed.decode('utf8'))
eq_(msg_body, parsed)

View File

@@ -2,9 +2,6 @@
from __future__ import absolute_import
import cchardet
import six
from talon import utils as u
from . import *
@@ -15,58 +12,6 @@ def test_get_delimiter():
eq_('\n', u.get_delimiter('abc'))
def test_unicode():
eq_(u'hi', u.to_unicode('hi'))
eq_(type(u.to_unicode('hi')), six.text_type)
eq_(type(u.to_unicode(u'hi')), six.text_type)
eq_(type(u.to_unicode('привет')), six.text_type)
eq_(type(u.to_unicode(u'привет')), six.text_type)
eq_(u"привет", u.to_unicode('привет'))
eq_(u"привет", u.to_unicode(u'привет'))
# some latin1 stuff
eq_(u"Versión", u.to_unicode(u'Versi\xf3n'.encode('iso-8859-2'), precise=True))
def test_detect_encoding():
eq_('ascii', u.detect_encoding(b'qwe').lower())
ok_(u.detect_encoding(
u'Versi\xf3n'.encode('iso-8859-2')).lower() in [
'iso-8859-1', 'iso-8859-2'])
eq_('utf-8', u.detect_encoding(u'привет'.encode('utf8')).lower())
# fallback to utf-8
with patch.object(u.chardet, 'detect') as detect:
detect.side_effect = Exception
eq_('utf-8', u.detect_encoding('qwe'.encode('utf8')).lower())
def test_quick_detect_encoding():
eq_('ascii', u.quick_detect_encoding(b'qwe').lower())
ok_(u.quick_detect_encoding(
u'Versi\xf3n'.encode('windows-1252')).lower() in [
'windows-1252', 'windows-1250'])
eq_('utf-8', u.quick_detect_encoding(u'привет'.encode('utf8')).lower())
@patch.object(cchardet, 'detect')
@patch.object(u, 'detect_encoding')
def test_quick_detect_encoding_edge_cases(detect_encoding, cchardet_detect):
cchardet_detect.return_value = {'encoding': 'ascii'}
eq_('ascii', u.quick_detect_encoding(b"qwe"))
cchardet_detect.assert_called_once_with(b"qwe")
# fallback to detect_encoding
cchardet_detect.return_value = {}
detect_encoding.return_value = 'utf-8'
eq_('utf-8', u.quick_detect_encoding(b"qwe"))
# exception
detect_encoding.reset_mock()
cchardet_detect.side_effect = Exception()
detect_encoding.return_value = 'utf-8'
eq_('utf-8', u.quick_detect_encoding(b"qwe"))
ok_(detect_encoding.called)
def test_html_to_text():
html = """<body>
<p>Hello world!</p>
@@ -80,11 +25,11 @@ Haha
</p>
</body>"""
text = u.html_to_text(html)
eq_(b"Hello world! \n\n * One! \n * Two \nHaha", text)
eq_(u"привет!", u.html_to_text("<b>привет!</b>").decode('utf8'))
eq_("Hello world! \n\n * One! \n * Two \nHaha", text)
eq_(u"привет!", u.html_to_text("<b>привет!</b>"))
html = '<body><br/><br/>Hi</body>'
eq_(b'Hi', u.html_to_text(html))
eq_('Hi', u.html_to_text(html))
html = """Hi
<style type="text/css">
@@ -104,60 +49,23 @@ font: 13px 'Lucida Grande', Arial, sans-serif;
}
</style>"""
eq_(b'Hi', u.html_to_text(html))
eq_('Hi', u.html_to_text(html))
html = """<div>
<!-- COMMENT 1 -->
<span>TEXT 1</span>
<p>TEXT 2 <!-- COMMENT 2 --></p>
</div>"""
eq_(b'TEXT 1 \nTEXT 2', u.html_to_text(html))
eq_('TEXT 1 \nTEXT 2', u.html_to_text(html))
def test_comment_no_parent():
s = b'<!-- COMMENT 1 --> no comment'
s = '<!-- COMMENT 1 --> no comment'
d = u.html_document_fromstring(s)
eq_(b"no comment", u.html_tree_to_text(d))
@patch.object(u.html5parser, 'fromstring', Mock(side_effect=Exception()))
def test_html_fromstring_exception():
eq_(None, u.html_fromstring("<html></html>"))
@patch.object(u, 'html_too_big', Mock())
@patch.object(u.html5parser, 'fromstring')
def test_html_fromstring_too_big(fromstring):
eq_(None, u.html_fromstring("<html></html>"))
assert_false(fromstring.called)
@patch.object(u.html5parser, 'document_fromstring')
def test_html_document_fromstring_exception(document_fromstring):
document_fromstring.side_effect = Exception()
eq_(None, u.html_document_fromstring("<html></html>"))
@patch.object(u, 'html_too_big', Mock())
@patch.object(u.html5parser, 'document_fromstring')
def test_html_document_fromstring_too_big(document_fromstring):
eq_(None, u.html_document_fromstring("<html></html>"))
assert_false(document_fromstring.called)
eq_("no comment", u.html_tree_to_text(d))
@patch.object(u, 'html_fromstring', Mock(return_value=None))
def test_bad_html_to_text():
bad_html = "one<br>two<br>three"
eq_(None, u.html_to_text(bad_html))
@patch.object(u, '_MAX_TAGS_COUNT', 3)
def test_html_too_big():
eq_(False, u.html_too_big("<div></div>"))
eq_(True, u.html_too_big("<div><span>Hi</span></div>"))
@patch.object(u, '_MAX_TAGS_COUNT', 3)
def test_html_to_text():
eq_(b"Hello", u.html_to_text("<div>Hello</div>"))
eq_(None, u.html_to_text("<div><span>Hi</span></div>"))