Compare commits
113 Commits
v1.2.16
...
maxim/deve
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
14f106ee76 | ||
|
|
a8c7e6a972 | ||
|
|
b30c375c5b | ||
|
|
cec5acf58f | ||
|
|
24d0f2d00a | ||
|
|
94007b0b92 | ||
|
|
1a5548f171 | ||
|
|
53c49b9121 | ||
|
|
bd50872043 | ||
|
|
d37c4fd551 | ||
|
|
d9ed7cc6d1 | ||
|
|
0a0808c0a8 | ||
|
|
16354e3528 | ||
|
|
1018e88ec1 | ||
|
|
2916351517 | ||
|
|
46d4b02c81 | ||
|
|
58eac88a10 | ||
|
|
2ef3d8dfbe | ||
|
|
7cf4c29340 | ||
|
|
cdd84563dd | ||
|
|
8138ea9a60 | ||
|
|
c171f9a875 | ||
|
|
3f97a8b8ff | ||
|
|
1147767ff3 | ||
|
|
6a304215c3 | ||
|
|
31714506bd | ||
|
|
403d80cf3b | ||
|
|
7cf20f2877 | ||
|
|
afff08b017 | ||
|
|
685abb1905 | ||
|
|
41990727a3 | ||
|
|
b113d8ab33 | ||
|
|
7bd0e9cc2f | ||
|
|
1e030a51d4 | ||
|
|
238a5de5cc | ||
|
|
53b24ffb3d | ||
|
|
a7404afbcb | ||
|
|
0e6d5f993c | ||
|
|
60637ff13a | ||
|
|
df8259e3fe | ||
|
|
aab3b1cc75 | ||
|
|
9492b39f2d | ||
|
|
b9ac866ea7 | ||
|
|
678517dd89 | ||
|
|
221774c6f8 | ||
|
|
a2aa345712 | ||
|
|
d998beaff3 | ||
|
|
a379bc4e7c | ||
|
|
b8e1894f3b | ||
|
|
0b5a44090f | ||
|
|
b40835eca2 | ||
|
|
b38562c7cc | ||
|
|
70e9fb415e | ||
|
|
64612099cd | ||
|
|
45c20f979d | ||
|
|
743c76f159 | ||
|
|
bc5dad75d3 | ||
|
|
4acf05cf28 | ||
|
|
f5f7264077 | ||
|
|
4364bebf38 | ||
|
|
15e61768f2 | ||
|
|
dd0a0f5c4d | ||
|
|
086f5ba43b | ||
|
|
e16dcf629e | ||
|
|
f16ae5110b | ||
|
|
ab5cbe5ec3 | ||
|
|
be5da92f16 | ||
|
|
95954a65a0 | ||
|
|
0b55e8fa77 | ||
|
|
6f159e8959 | ||
|
|
5c413b4b00 | ||
|
|
cca64d3ed1 | ||
|
|
e11eaf6ff8 | ||
|
|
85a4c1d855 | ||
|
|
0f5e72623b | ||
|
|
061e549ad7 | ||
|
|
49d1a5d248 | ||
|
|
03d6b00db8 | ||
|
|
a2eb0f7201 | ||
|
|
5c71a0ca07 | ||
|
|
489d16fad9 | ||
|
|
a458707777 | ||
|
|
a1d0a86305 | ||
|
|
29f1d21be7 | ||
|
|
34c5b526c3 | ||
|
|
3edb6578ba | ||
|
|
984c036b6e | ||
|
|
a403ecb5c9 | ||
|
|
a44713409c | ||
|
|
567467b8ed | ||
|
|
139edd6104 | ||
|
|
e756d55abf | ||
|
|
015c8d2a78 | ||
|
|
5af846c13d | ||
|
|
e69a9c7a54 | ||
|
|
23cb2a9a53 | ||
|
|
b5e3397b88 | ||
|
|
5685a4055a | ||
|
|
97b72ef767 | ||
|
|
31489848be | ||
|
|
e5988d447b | ||
|
|
adfed748ce | ||
|
|
2444ba87c0 | ||
|
|
534457e713 | ||
|
|
ea82a9730e | ||
|
|
f04b872e14 | ||
|
|
e61894e425 | ||
|
|
35fbdaadac | ||
|
|
8441bc7328 | ||
|
|
37c95ff97b | ||
|
|
5b1ca33c57 | ||
|
|
ec8e09b34e | ||
|
|
bcf97eccfa |
20
.build/Dockerfile
Normal file
20
.build/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
FROM python:3.9-slim-buster AS deps
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential git curl python3-dev libatlas3-base libatlas-base-dev liblapack-dev libxml2 libxml2-dev libffi6 libffi-dev musl-dev libxslt-dev
|
||||||
|
|
||||||
|
FROM deps AS testable
|
||||||
|
ARG REPORT_PATH
|
||||||
|
|
||||||
|
VOLUME ["/var/mailgun", "/etc/mailgun/ssl", ${REPORT_PATH}]
|
||||||
|
|
||||||
|
ADD . /app
|
||||||
|
WORKDIR /app
|
||||||
|
COPY wheel/* /wheel/
|
||||||
|
|
||||||
|
RUN mkdir -p ${REPORT_PATH}
|
||||||
|
|
||||||
|
RUN python ./setup.py build bdist_wheel -d /wheel && \
|
||||||
|
pip install --no-deps /wheel/*
|
||||||
|
|
||||||
|
ENTRYPOINT ["/bin/sh", "/app/run_tests.sh"]
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -39,6 +39,8 @@ nosetests.xml
|
|||||||
/.emacs.desktop
|
/.emacs.desktop
|
||||||
/.emacs.desktop.lock
|
/.emacs.desktop.lock
|
||||||
.elc
|
.elc
|
||||||
|
.idea
|
||||||
|
.cache
|
||||||
auto-save-list
|
auto-save-list
|
||||||
tramp
|
tramp
|
||||||
.\#*
|
.\#*
|
||||||
@@ -52,3 +54,6 @@ _trial_temp
|
|||||||
|
|
||||||
# OSX
|
# OSX
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
# vim-backup
|
||||||
|
*.bak
|
||||||
|
|||||||
@@ -5,3 +5,10 @@ include classifier
|
|||||||
include LICENSE
|
include LICENSE
|
||||||
include MANIFEST.in
|
include MANIFEST.in
|
||||||
include README.rst
|
include README.rst
|
||||||
|
include talon/signature/data/train.data
|
||||||
|
include talon/signature/data/classifier
|
||||||
|
include talon/signature/data/classifier_01.npy
|
||||||
|
include talon/signature/data/classifier_02.npy
|
||||||
|
include talon/signature/data/classifier_03.npy
|
||||||
|
include talon/signature/data/classifier_04.npy
|
||||||
|
include talon/signature/data/classifier_05.npy
|
||||||
|
|||||||
16
README.rst
16
README.rst
@@ -129,6 +129,22 @@ start using it for talon.
|
|||||||
.. _EDRM: http://www.edrm.net/resources/data-sets/edrm-enron-email-data-set
|
.. _EDRM: http://www.edrm.net/resources/data-sets/edrm-enron-email-data-set
|
||||||
.. _forge: https://github.com/mailgun/forge
|
.. _forge: https://github.com/mailgun/forge
|
||||||
|
|
||||||
|
Training on your dataset
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
talon comes with a pre-processed dataset and a pre-trained classifier. To retrain the classifier on your own dataset of raw emails, structure and annotate them in the same way the `forge`_ project does. Then do:
|
||||||
|
|
||||||
|
.. code:: python
|
||||||
|
|
||||||
|
from talon.signature.learning.dataset import build_extraction_dataset
|
||||||
|
from talon.signature.learning import classifier as c
|
||||||
|
|
||||||
|
build_extraction_dataset("/path/to/your/P/folder", "/path/to/talon/signature/data/train.data")
|
||||||
|
c.train(c.init(), "/path/to/talon/signature/data/train.data", "/path/to/talon/signature/data/classifier")
|
||||||
|
|
||||||
|
Note that for signature extraction you need just the folder with the positive samples with annotated signature lines (P folder).
|
||||||
|
|
||||||
|
.. _forge: https://github.com/mailgun/forge
|
||||||
|
|
||||||
Research
|
Research
|
||||||
--------
|
--------
|
||||||
|
|||||||
11
requirements.txt
Normal file
11
requirements.txt
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
chardet>=1.0.1
|
||||||
|
cchardet>=0.3.5
|
||||||
|
cssselect
|
||||||
|
html5lib
|
||||||
|
joblib
|
||||||
|
lxml>=2.3.3
|
||||||
|
numpy
|
||||||
|
regex>=1
|
||||||
|
scikit-learn>=1.0.0
|
||||||
|
scipy
|
||||||
|
six>=1.10.0
|
||||||
4
run_tests.sh
Executable file
4
run_tests.sh
Executable file
@@ -0,0 +1,4 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -ex
|
||||||
|
REPORT_PATH="${REPORT_PATH:-./}"
|
||||||
|
nosetests --with-xunit --with-coverage --cover-xml --cover-xml-file $REPORT_PATH/coverage.xml --xunit-file=$REPORT_PATH/nosetests.xml --cover-package=talon .
|
||||||
30
setup.py
30
setup.py
@@ -19,17 +19,17 @@ class InstallCommand(install):
|
|||||||
if self.no_ml:
|
if self.no_ml:
|
||||||
dist = self.distribution
|
dist = self.distribution
|
||||||
dist.packages=find_packages(exclude=[
|
dist.packages=find_packages(exclude=[
|
||||||
'tests',
|
"tests",
|
||||||
'tests.*',
|
"tests.*",
|
||||||
'talon.signature',
|
"talon.signature",
|
||||||
'talon.signature.*',
|
"talon.signature.*",
|
||||||
])
|
])
|
||||||
for not_required in ['numpy', 'scipy', 'scikit-learn==0.16.1']:
|
for not_required in ["numpy", "scipy", "scikit-learn==0.24.1"]:
|
||||||
dist.install_requires.remove(not_required)
|
dist.install_requires.remove(not_required)
|
||||||
|
|
||||||
|
|
||||||
setup(name='talon',
|
setup(name='talon',
|
||||||
version='1.2.16',
|
version='1.6.0',
|
||||||
description=("Mailgun library "
|
description=("Mailgun library "
|
||||||
"to extract message quotations and signatures."),
|
"to extract message quotations and signatures."),
|
||||||
long_description=open("README.rst").read(),
|
long_description=open("README.rst").read(),
|
||||||
@@ -44,19 +44,21 @@ setup(name='talon',
|
|||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
zip_safe=True,
|
zip_safe=True,
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"lxml>=2.3.3",
|
"lxml",
|
||||||
"regex>=1",
|
"regex",
|
||||||
"numpy",
|
"numpy",
|
||||||
"scipy",
|
"scipy",
|
||||||
"scikit-learn==0.16.1", # pickled versions of classifier, else rebuild
|
"scikit-learn>=1.0.0",
|
||||||
'chardet>=1.0.1',
|
"chardet",
|
||||||
'cchardet>=0.3.5',
|
"cchardet",
|
||||||
'cssselect',
|
"cssselect",
|
||||||
'six>=1.10.0',
|
"six",
|
||||||
|
"html5lib",
|
||||||
|
"joblib",
|
||||||
],
|
],
|
||||||
tests_require=[
|
tests_require=[
|
||||||
"mock",
|
"mock",
|
||||||
"nose>=1.2.1",
|
"nose",
|
||||||
"coverage"
|
"coverage"
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ messages (without quoted messages) from html
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
|
from talon.utils import cssselect
|
||||||
|
|
||||||
CHECKPOINT_PREFIX = '#!%!'
|
CHECKPOINT_PREFIX = '#!%!'
|
||||||
CHECKPOINT_SUFFIX = '!%!#'
|
CHECKPOINT_SUFFIX = '!%!#'
|
||||||
@@ -78,7 +79,7 @@ def delete_quotation_tags(html_note, counter, quotation_checkpoints):
|
|||||||
|
|
||||||
def cut_gmail_quote(html_message):
|
def cut_gmail_quote(html_message):
|
||||||
''' Cuts the outermost block element with class gmail_quote. '''
|
''' Cuts the outermost block element with class gmail_quote. '''
|
||||||
gmail_quote = html_message.cssselect('div.gmail_quote')
|
gmail_quote = cssselect('div.gmail_quote', html_message)
|
||||||
if gmail_quote and (gmail_quote[0].text is None or not RE_FWD.match(gmail_quote[0].text)):
|
if gmail_quote and (gmail_quote[0].text is None or not RE_FWD.match(gmail_quote[0].text)):
|
||||||
gmail_quote[0].getparent().remove(gmail_quote[0])
|
gmail_quote[0].getparent().remove(gmail_quote[0])
|
||||||
return True
|
return True
|
||||||
@@ -86,17 +87,24 @@ def cut_gmail_quote(html_message):
|
|||||||
|
|
||||||
def cut_microsoft_quote(html_message):
|
def cut_microsoft_quote(html_message):
|
||||||
''' Cuts splitter block and all following blocks. '''
|
''' Cuts splitter block and all following blocks. '''
|
||||||
|
#use EXSLT extensions to have a regex match() function with lxml
|
||||||
|
ns = {"re": "http://exslt.org/regular-expressions"}
|
||||||
|
|
||||||
|
#general pattern: @style='border:none;border-top:solid <color> 1.0pt;padding:3.0pt 0<unit> 0<unit> 0<unit>'
|
||||||
|
#outlook 2007, 2010 (international) <color=#B5C4DF> <unit=cm>
|
||||||
|
#outlook 2007, 2010 (american) <color=#B5C4DF> <unit=pt>
|
||||||
|
#outlook 2013 (international) <color=#E1E1E1> <unit=cm>
|
||||||
|
#outlook 2013 (american) <color=#E1E1E1> <unit=pt>
|
||||||
|
#also handles a variant with a space after the semicolon
|
||||||
splitter = html_message.xpath(
|
splitter = html_message.xpath(
|
||||||
#outlook 2007, 2010 (international)
|
#outlook 2007, 2010, 2013 (international, american)
|
||||||
"//div[@style='border:none;border-top:solid #B5C4DF 1.0pt;"
|
"//div[@style[re:match(., 'border:none; ?border-top:solid #(E1E1E1|B5C4DF) 1.0pt; ?"
|
||||||
"padding:3.0pt 0cm 0cm 0cm']|"
|
"padding:3.0pt 0(in|cm) 0(in|cm) 0(in|cm)')]]|"
|
||||||
#outlook 2007, 2010 (american)
|
|
||||||
"//div[@style='border:none;border-top:solid #B5C4DF 1.0pt;"
|
|
||||||
"padding:3.0pt 0in 0in 0in']|"
|
|
||||||
#windows mail
|
#windows mail
|
||||||
"//div[@style='padding-top: 5px; "
|
"//div[@style='padding-top: 5px; "
|
||||||
"border-top-color: rgb(229, 229, 229); "
|
"border-top-color: rgb(229, 229, 229); "
|
||||||
"border-top-width: 1px; border-top-style: solid;']"
|
"border-top-width: 1px; border-top-style: solid;']"
|
||||||
|
, namespaces=ns
|
||||||
)
|
)
|
||||||
|
|
||||||
if splitter:
|
if splitter:
|
||||||
@@ -135,7 +143,7 @@ def cut_microsoft_quote(html_message):
|
|||||||
def cut_by_id(html_message):
|
def cut_by_id(html_message):
|
||||||
found = False
|
found = False
|
||||||
for quote_id in QUOTE_IDS:
|
for quote_id in QUOTE_IDS:
|
||||||
quote = html_message.cssselect('#{}'.format(quote_id))
|
quote = cssselect('#{}'.format(quote_id), html_message)
|
||||||
if quote:
|
if quote:
|
||||||
found = True
|
found = True
|
||||||
quote[0].getparent().remove(quote[0])
|
quote[0].getparent().remove(quote[0])
|
||||||
|
|||||||
@@ -6,22 +6,22 @@ original messages (without quoted messages)
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import regex as re
|
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
from lxml import html, etree
|
import regex as re
|
||||||
|
from lxml import etree, html
|
||||||
from talon.utils import get_delimiter, html_tree_to_text
|
|
||||||
from talon import html_quotations
|
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
import six
|
|
||||||
|
|
||||||
|
from talon import html_quotations
|
||||||
|
from talon.utils import (get_delimiter, html_document_fromstring,
|
||||||
|
html_tree_to_text)
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
RE_FWD = re.compile("^[-]+[ ]*Forwarded message[ ]*[-]+$", re.I | re.M)
|
RE_FWD = re.compile("^[-]+[ ]*Forwarded message[ ]*[-]+\s*$", re.I | re.M)
|
||||||
|
|
||||||
RE_ON_DATE_SMB_WROTE = re.compile(
|
RE_ON_DATE_SMB_WROTE = re.compile(
|
||||||
u'(-*[>]?[ ]?({0})[ ].*({1})(.*\n){{0,2}}.*({2}):?-*)'.format(
|
u'(-*[>]?[ ]?({0})[ ].*({1})(.*\n){{0,2}}.*({2}):?-*)'.format(
|
||||||
@@ -37,10 +37,14 @@ RE_ON_DATE_SMB_WROTE = re.compile(
|
|||||||
'Op',
|
'Op',
|
||||||
# German
|
# German
|
||||||
'Am',
|
'Am',
|
||||||
|
# Portuguese
|
||||||
|
'Em',
|
||||||
# Norwegian
|
# Norwegian
|
||||||
u'På',
|
u'På',
|
||||||
# Swedish, Danish
|
# Swedish, Danish
|
||||||
'Den',
|
'Den',
|
||||||
|
# Vietnamese
|
||||||
|
u'Vào',
|
||||||
)),
|
)),
|
||||||
# Date and sender separator
|
# Date and sender separator
|
||||||
u'|'.join((
|
u'|'.join((
|
||||||
@@ -61,8 +65,12 @@ RE_ON_DATE_SMB_WROTE = re.compile(
|
|||||||
'schreef','verzond','geschreven',
|
'schreef','verzond','geschreven',
|
||||||
# German
|
# German
|
||||||
'schrieb',
|
'schrieb',
|
||||||
|
# Portuguese
|
||||||
|
'escreveu',
|
||||||
# Norwegian, Swedish
|
# Norwegian, Swedish
|
||||||
'skrev',
|
'skrev',
|
||||||
|
# Vietnamese
|
||||||
|
u'đã viết',
|
||||||
))
|
))
|
||||||
))
|
))
|
||||||
# Special case for languages where text is translated like this: 'on {date} wrote {somebody}:'
|
# Special case for languages where text is translated like this: 'on {date} wrote {somebody}:'
|
||||||
@@ -85,7 +93,7 @@ RE_ON_DATE_WROTE_SMB = re.compile(
|
|||||||
)
|
)
|
||||||
|
|
||||||
RE_QUOTATION = re.compile(
|
RE_QUOTATION = re.compile(
|
||||||
r'''
|
r"""
|
||||||
(
|
(
|
||||||
# quotation border: splitter line or a number of quotation marker lines
|
# quotation border: splitter line or a number of quotation marker lines
|
||||||
(?:
|
(?:
|
||||||
@@ -103,10 +111,10 @@ RE_QUOTATION = re.compile(
|
|||||||
|
|
||||||
# after quotations should be text only or nothing at all
|
# after quotations should be text only or nothing at all
|
||||||
[te]*$
|
[te]*$
|
||||||
''', re.VERBOSE)
|
""", re.VERBOSE)
|
||||||
|
|
||||||
RE_EMPTY_QUOTATION = re.compile(
|
RE_EMPTY_QUOTATION = re.compile(
|
||||||
r'''
|
r"""
|
||||||
(
|
(
|
||||||
# quotation border: splitter line or a number of quotation marker lines
|
# quotation border: splitter line or a number of quotation marker lines
|
||||||
(?:
|
(?:
|
||||||
@@ -116,7 +124,7 @@ RE_EMPTY_QUOTATION = re.compile(
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
e*
|
e*
|
||||||
''', re.VERBOSE)
|
""", re.VERBOSE)
|
||||||
|
|
||||||
# ------Original Message------ or ---- Reply Message ----
|
# ------Original Message------ or ---- Reply Message ----
|
||||||
# With variations in other languages.
|
# With variations in other languages.
|
||||||
@@ -130,14 +138,33 @@ RE_ORIGINAL_MESSAGE = re.compile(u'[\s]*[-]+[ ]*({})[ ]*[-]+'.format(
|
|||||||
'Oprindelig meddelelse',
|
'Oprindelig meddelelse',
|
||||||
))), re.I)
|
))), re.I)
|
||||||
|
|
||||||
RE_FROM_COLON_OR_DATE_COLON = re.compile(u'(_+\r?\n)?[\s]*(:?[*]?{})[\s]?:[*]? .*'.format(
|
RE_FROM_COLON_OR_DATE_COLON = re.compile(u'((_+\r?\n)?[\s]*:?[*]?({})[\s]?:([^\n$]+\n){{1,2}}){{2,}}'.format(
|
||||||
u'|'.join((
|
u'|'.join((
|
||||||
# "From" in different languages.
|
# "From" in different languages.
|
||||||
'From', 'Van', 'De', 'Von', 'Fra', u'Från',
|
'From', 'Van', 'De', 'Von', 'Fra', u'Från',
|
||||||
# "Date" in different languages.
|
# "Date" in different languages.
|
||||||
'Date', 'Datum', u'Envoyé', 'Skickat', 'Sendt',
|
'Date', '[S]ent', 'Datum', u'Envoyé', 'Skickat', 'Sendt', 'Gesendet',
|
||||||
|
# "Subject" in different languages.
|
||||||
|
'Subject', 'Betreff', 'Objet', 'Emne', u'Ämne',
|
||||||
|
# "To" in different languages.
|
||||||
|
'To', 'An', 'Til', u'À', 'Till'
|
||||||
|
))), re.I | re.M)
|
||||||
|
|
||||||
|
# ---- John Smith wrote ----
|
||||||
|
RE_ANDROID_WROTE = re.compile(u'[\s]*[-]+.*({})[ ]*[-]+'.format(
|
||||||
|
u'|'.join((
|
||||||
|
# English
|
||||||
|
'wrote',
|
||||||
))), re.I)
|
))), re.I)
|
||||||
|
|
||||||
|
# Support polymail.io reply format
|
||||||
|
# On Tue, Apr 11, 2017 at 10:07 PM John Smith
|
||||||
|
#
|
||||||
|
# <
|
||||||
|
# mailto:John Smith <johnsmith@gmail.com>
|
||||||
|
# > wrote:
|
||||||
|
RE_POLYMAIL = re.compile('On.*\s{2}<\smailto:.*\s> wrote:', re.I)
|
||||||
|
|
||||||
SPLITTER_PATTERNS = [
|
SPLITTER_PATTERNS = [
|
||||||
RE_ORIGINAL_MESSAGE,
|
RE_ORIGINAL_MESSAGE,
|
||||||
RE_ON_DATE_SMB_WROTE,
|
RE_ON_DATE_SMB_WROTE,
|
||||||
@@ -145,32 +172,33 @@ SPLITTER_PATTERNS = [
|
|||||||
RE_FROM_COLON_OR_DATE_COLON,
|
RE_FROM_COLON_OR_DATE_COLON,
|
||||||
# 02.04.2012 14:20 пользователь "bob@example.com" <
|
# 02.04.2012 14:20 пользователь "bob@example.com" <
|
||||||
# bob@xxx.mailgun.org> написал:
|
# bob@xxx.mailgun.org> написал:
|
||||||
re.compile("(\d+/\d+/\d+|\d+\.\d+\.\d+).*@", re.S),
|
re.compile("(\d+/\d+/\d+|\d+\.\d+\.\d+).*\s\S+@\S+", re.S),
|
||||||
# 2014-10-17 11:28 GMT+03:00 Bob <
|
# 2014-10-17 11:28 GMT+03:00 Bob <
|
||||||
# bob@example.com>:
|
# bob@example.com>:
|
||||||
re.compile("\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}\s+GMT.*@", re.S),
|
re.compile("\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}\s+GMT.*\s\S+@\S+", re.S),
|
||||||
# Thu, 26 Jun 2014 14:00:51 +0400 Bob <bob@example.com>:
|
# Thu, 26 Jun 2014 14:00:51 +0400 Bob <bob@example.com>:
|
||||||
re.compile('\S{3,10}, \d\d? \S{3,10} 20\d\d,? \d\d?:\d\d(:\d\d)?'
|
re.compile('\S{3,10}, \d\d? \S{3,10} 20\d\d,? \d\d?:\d\d(:\d\d)?'
|
||||||
'( \S+){3,6}@\S+:'),
|
'( \S+){3,6}@\S+:'),
|
||||||
# Sent from Samsung MobileName <address@example.com> wrote:
|
# Sent from Samsung MobileName <address@example.com> wrote:
|
||||||
re.compile('Sent from Samsung .*@.*> wrote')
|
re.compile('Sent from Samsung.* \S+@\S+> wrote'),
|
||||||
|
RE_ANDROID_WROTE,
|
||||||
|
RE_POLYMAIL
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
RE_LINK = re.compile('<(http://[^>]*)>')
|
RE_LINK = re.compile('<(http://[^>]*)>')
|
||||||
RE_NORMALIZED_LINK = re.compile('@@(http://[^>@]*)@@')
|
RE_NORMALIZED_LINK = re.compile('@@(http://[^>@]*)@@')
|
||||||
|
|
||||||
RE_PARENTHESIS_LINK = re.compile("\(https?://")
|
RE_PARENTHESIS_LINK = re.compile("\(https?://")
|
||||||
|
|
||||||
SPLITTER_MAX_LINES = 4
|
SPLITTER_MAX_LINES = 6
|
||||||
MAX_LINES_COUNT = 1000
|
MAX_LINES_COUNT = 1000
|
||||||
# an extensive research shows that exceeding this limit
|
|
||||||
# leads to excessive processing time
|
|
||||||
MAX_HTML_LEN = 2794202
|
|
||||||
|
|
||||||
QUOT_PATTERN = re.compile('^>+ ?')
|
QUOT_PATTERN = re.compile('^>+ ?')
|
||||||
NO_QUOT_LINE = re.compile('^[^>].*[\S].*')
|
NO_QUOT_LINE = re.compile('^[^>].*[\S].*')
|
||||||
|
|
||||||
|
# Regular expression to identify if a line is a header.
|
||||||
|
RE_HEADER = re.compile(": ")
|
||||||
|
|
||||||
|
|
||||||
def extract_from(msg_body, content_type='text/plain'):
|
def extract_from(msg_body, content_type='text/plain'):
|
||||||
try:
|
try:
|
||||||
@@ -184,6 +212,19 @@ def extract_from(msg_body, content_type='text/plain'):
|
|||||||
return msg_body
|
return msg_body
|
||||||
|
|
||||||
|
|
||||||
|
def remove_initial_spaces_and_mark_message_lines(lines):
|
||||||
|
"""
|
||||||
|
Removes the initial spaces in each line before marking message lines.
|
||||||
|
|
||||||
|
This ensures headers can be identified if they are indented with spaces.
|
||||||
|
"""
|
||||||
|
i = 0
|
||||||
|
while i < len(lines):
|
||||||
|
lines[i] = lines[i].lstrip(' ')
|
||||||
|
i += 1
|
||||||
|
return mark_message_lines(lines)
|
||||||
|
|
||||||
|
|
||||||
def mark_message_lines(lines):
|
def mark_message_lines(lines):
|
||||||
"""Mark message lines with markers to distinguish quotation lines.
|
"""Mark message lines with markers to distinguish quotation lines.
|
||||||
|
|
||||||
@@ -249,7 +290,7 @@ def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
|
|||||||
# inlined reply
|
# inlined reply
|
||||||
# use lookbehind assertions to find overlapping entries e.g. for 'mtmtm'
|
# use lookbehind assertions to find overlapping entries e.g. for 'mtmtm'
|
||||||
# both 't' entries should be found
|
# both 't' entries should be found
|
||||||
for inline_reply in re.finditer('(?<=m)e*((?:t+e*)+)m', markers):
|
for inline_reply in re.finditer('(?<=m)e*(t[te]*)m', markers):
|
||||||
# long links could break sequence of quotation lines but they shouldn't
|
# long links could break sequence of quotation lines but they shouldn't
|
||||||
# be considered an inline reply
|
# be considered an inline reply
|
||||||
links = (
|
links = (
|
||||||
@@ -286,12 +327,21 @@ def preprocess(msg_body, delimiter, content_type='text/plain'):
|
|||||||
|
|
||||||
Converts msg_body into a unicode.
|
Converts msg_body into a unicode.
|
||||||
"""
|
"""
|
||||||
# normalize links i.e. replace '<', '>' wrapping the link with some symbols
|
msg_body = _replace_link_brackets(msg_body)
|
||||||
# so that '>' closing the link couldn't be mistakenly taken for quotation
|
|
||||||
# marker.
|
|
||||||
if isinstance(msg_body, bytes):
|
|
||||||
msg_body = msg_body.decode('utf8')
|
|
||||||
|
|
||||||
|
msg_body = _wrap_splitter_with_newline(msg_body, delimiter, content_type)
|
||||||
|
|
||||||
|
return msg_body
|
||||||
|
|
||||||
|
|
||||||
|
def _replace_link_brackets(msg_body):
|
||||||
|
"""
|
||||||
|
Normalize links i.e. replace '<', '>' wrapping the link with some symbols
|
||||||
|
so that '>' closing the link couldn't be mistakenly taken for quotation
|
||||||
|
marker.
|
||||||
|
|
||||||
|
Converts msg_body into a unicode
|
||||||
|
"""
|
||||||
def link_wrapper(link):
|
def link_wrapper(link):
|
||||||
newline_index = msg_body[:link.start()].rfind("\n")
|
newline_index = msg_body[:link.start()].rfind("\n")
|
||||||
if msg_body[newline_index + 1] == ">":
|
if msg_body[newline_index + 1] == ">":
|
||||||
@@ -300,7 +350,14 @@ def preprocess(msg_body, delimiter, content_type='text/plain'):
|
|||||||
return "@@%s@@" % link.group(1)
|
return "@@%s@@" % link.group(1)
|
||||||
|
|
||||||
msg_body = re.sub(RE_LINK, link_wrapper, msg_body)
|
msg_body = re.sub(RE_LINK, link_wrapper, msg_body)
|
||||||
|
return msg_body
|
||||||
|
|
||||||
|
|
||||||
|
def _wrap_splitter_with_newline(msg_body, delimiter, content_type='text/plain'):
|
||||||
|
"""
|
||||||
|
Splits line in two if splitter pattern preceded by some text on the same
|
||||||
|
line (done only for 'On <date> <person> wrote:' pattern.
|
||||||
|
"""
|
||||||
def splitter_wrapper(splitter):
|
def splitter_wrapper(splitter):
|
||||||
"""Wraps splitter with new line"""
|
"""Wraps splitter with new line"""
|
||||||
if splitter.start() and msg_body[splitter.start() - 1] != '\n':
|
if splitter.start() and msg_body[splitter.start() - 1] != '\n':
|
||||||
@@ -324,8 +381,6 @@ def postprocess(msg_body):
|
|||||||
|
|
||||||
def extract_from_plain(msg_body):
|
def extract_from_plain(msg_body):
|
||||||
"""Extracts a non quoted message from provided plain text."""
|
"""Extracts a non quoted message from provided plain text."""
|
||||||
stripped_text = msg_body
|
|
||||||
|
|
||||||
delimiter = get_delimiter(msg_body)
|
delimiter = get_delimiter(msg_body)
|
||||||
msg_body = preprocess(msg_body, delimiter)
|
msg_body = preprocess(msg_body, delimiter)
|
||||||
# don't process too long messages
|
# don't process too long messages
|
||||||
@@ -357,22 +412,27 @@ def extract_from_html(msg_body):
|
|||||||
|
|
||||||
Returns a unicode string.
|
Returns a unicode string.
|
||||||
"""
|
"""
|
||||||
if isinstance(msg_body, six.text_type):
|
if msg_body.strip() == "":
|
||||||
msg_body = msg_body.encode('utf8')
|
return msg_body
|
||||||
elif not isinstance(msg_body, bytes):
|
|
||||||
msg_body = msg_body.encode('ascii')
|
|
||||||
|
|
||||||
result = _extract_from_html(msg_body)
|
msg_body = msg_body.replace("\r\n", "\n")
|
||||||
if isinstance(result, bytes):
|
# Cut out xml and doctype tags to avoid conflict with unicode decoding.
|
||||||
result = result.decode('utf8')
|
msg_body = re.sub(r"\<\?xml.+\?\>|\<\!DOCTYPE.+]\>", "", msg_body)
|
||||||
|
html_tree = html_document_fromstring(msg_body)
|
||||||
|
if html_tree is None:
|
||||||
|
return msg_body
|
||||||
|
|
||||||
|
result = extract_from_html_tree(html_tree)
|
||||||
|
if not result:
|
||||||
|
return msg_body
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def _extract_from_html(msg_body):
|
def extract_from_html_tree(html_tree):
|
||||||
"""
|
"""
|
||||||
Extract not quoted message from provided html message body
|
Extract not quoted message from provided parsed html tree using tags and
|
||||||
using tags and plain text algorithm.
|
plain text algorithm.
|
||||||
|
|
||||||
Cut out the 'blockquote', 'gmail_quote' tags.
|
Cut out the 'blockquote', 'gmail_quote' tags.
|
||||||
Cut Microsoft quotations.
|
Cut Microsoft quotations.
|
||||||
@@ -385,17 +445,6 @@ def _extract_from_html(msg_body):
|
|||||||
then checking deleted checkpoints,
|
then checking deleted checkpoints,
|
||||||
then deleting necessary tags.
|
then deleting necessary tags.
|
||||||
"""
|
"""
|
||||||
if len(msg_body) > MAX_HTML_LEN:
|
|
||||||
return msg_body
|
|
||||||
|
|
||||||
if msg_body.strip() == b'':
|
|
||||||
return msg_body
|
|
||||||
|
|
||||||
msg_body = msg_body.replace(b'\r\n', b'\n')
|
|
||||||
html_tree = html.document_fromstring(
|
|
||||||
msg_body,
|
|
||||||
parser=html.HTMLParser(encoding="utf-8")
|
|
||||||
)
|
|
||||||
cut_quotations = (html_quotations.cut_gmail_quote(html_tree) or
|
cut_quotations = (html_quotations.cut_gmail_quote(html_tree) or
|
||||||
html_quotations.cut_zimbra_quote(html_tree) or
|
html_quotations.cut_zimbra_quote(html_tree) or
|
||||||
html_quotations.cut_blockquote(html_tree) or
|
html_quotations.cut_blockquote(html_tree) or
|
||||||
@@ -413,7 +462,7 @@ def _extract_from_html(msg_body):
|
|||||||
|
|
||||||
# Don't process too long messages
|
# Don't process too long messages
|
||||||
if len(lines) > MAX_LINES_COUNT:
|
if len(lines) > MAX_LINES_COUNT:
|
||||||
return msg_body
|
return None
|
||||||
|
|
||||||
# Collect checkpoints on each line
|
# Collect checkpoints on each line
|
||||||
line_checkpoints = [
|
line_checkpoints = [
|
||||||
@@ -432,7 +481,7 @@ def _extract_from_html(msg_body):
|
|||||||
lines_were_deleted, first_deleted, last_deleted = return_flags
|
lines_were_deleted, first_deleted, last_deleted = return_flags
|
||||||
|
|
||||||
if not lines_were_deleted and not cut_quotations:
|
if not lines_were_deleted and not cut_quotations:
|
||||||
return msg_body
|
return None
|
||||||
|
|
||||||
if lines_were_deleted:
|
if lines_were_deleted:
|
||||||
#collect checkpoints from deleted lines
|
#collect checkpoints from deleted lines
|
||||||
@@ -446,9 +495,148 @@ def _extract_from_html(msg_body):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if _readable_text_empty(html_tree_copy):
|
if _readable_text_empty(html_tree_copy):
|
||||||
return msg_body
|
return None
|
||||||
|
|
||||||
return html.tostring(html_tree_copy)
|
# NOTE: We remove_namespaces() because we are using an HTML5 Parser, HTML
|
||||||
|
# parsers do not recognize namespaces in HTML tags. As such the rendered
|
||||||
|
# HTML tags are no longer recognizable HTML tags. Example: <o:p> becomes
|
||||||
|
# <oU0003Ap>. When we port this to golang we should look into using an
|
||||||
|
# XML Parser NOT and HTML5 Parser since we do not know what input a
|
||||||
|
# customer will send us. Switching to a common XML parser in python
|
||||||
|
# opens us up to a host of vulnerabilities.
|
||||||
|
# See https://docs.python.org/3/library/xml.html#xml-vulnerabilities
|
||||||
|
#
|
||||||
|
# The down sides to removing the namespaces is that customers might
|
||||||
|
# judge the XML namespaces important. If that is the case then support
|
||||||
|
# should encourage customers to preform XML parsing of the un-stripped
|
||||||
|
# body to get the full unmodified XML payload.
|
||||||
|
#
|
||||||
|
# Alternatives to this approach are
|
||||||
|
# 1. Ignore the U0003A in tag names and let the customer deal with it.
|
||||||
|
# This is not ideal, as most customers use stripped-html for viewing
|
||||||
|
# emails sent from a recipient, as such they cannot control the HTML
|
||||||
|
# provided by a recipient.
|
||||||
|
# 2. Preform a string replace of 'U0003A' to ':' on the rendered HTML
|
||||||
|
# string. While this would solve the issue simply, it runs the risk
|
||||||
|
# of replacing data outside the <tag> which might be essential to
|
||||||
|
# the customer.
|
||||||
|
remove_namespaces(html_tree_copy)
|
||||||
|
s = html.tostring(html_tree_copy, encoding="ascii")
|
||||||
|
if not s:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return s.decode("ascii")
|
||||||
|
|
||||||
|
|
||||||
|
def remove_namespaces(root):
|
||||||
|
"""
|
||||||
|
Given the root of an HTML document iterate through all the elements
|
||||||
|
and remove any namespaces that might have been provided and remove
|
||||||
|
any attributes that contain a namespace
|
||||||
|
|
||||||
|
<html xmlns:o="urn:schemas-microsoft-com:office:office">
|
||||||
|
becomes
|
||||||
|
<html>
|
||||||
|
|
||||||
|
<o:p>Hi</o:p>
|
||||||
|
becomes
|
||||||
|
<p>Hi</p>
|
||||||
|
|
||||||
|
Start tags do NOT have a namespace; COLON characters have no special meaning.
|
||||||
|
if we don't remove the namespace the parser translates the tag name into a
|
||||||
|
unicode representation. For example <o:p> becomes <oU0003Ap>
|
||||||
|
|
||||||
|
See https://www.w3.org/TR/2011/WD-html5-20110525/syntax.html#start-tags
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
for child in root.iter():
|
||||||
|
for key, value in child.attrib.items():
|
||||||
|
# If the attribute includes a colon
|
||||||
|
if key.rfind("U0003A") != -1:
|
||||||
|
child.attrib.pop(key)
|
||||||
|
|
||||||
|
# If the tag includes a colon
|
||||||
|
idx = child.tag.rfind("U0003A")
|
||||||
|
if idx != -1:
|
||||||
|
child.tag = child.tag[idx+6:]
|
||||||
|
|
||||||
|
return root
|
||||||
|
|
||||||
|
|
||||||
|
def split_emails(msg):
|
||||||
|
"""
|
||||||
|
Given a message (which may consist of an email conversation thread with
|
||||||
|
multiple emails), mark the lines to identify split lines, content lines and
|
||||||
|
empty lines.
|
||||||
|
|
||||||
|
Correct the split line markers inside header blocks. Header blocks are
|
||||||
|
identified by the regular expression RE_HEADER.
|
||||||
|
|
||||||
|
Return the corrected markers
|
||||||
|
"""
|
||||||
|
msg_body = _replace_link_brackets(msg)
|
||||||
|
|
||||||
|
# don't process too long messages
|
||||||
|
lines = msg_body.splitlines()[:MAX_LINES_COUNT]
|
||||||
|
markers = remove_initial_spaces_and_mark_message_lines(lines)
|
||||||
|
|
||||||
|
markers = _mark_quoted_email_splitlines(markers, lines)
|
||||||
|
|
||||||
|
# we don't want splitlines in header blocks
|
||||||
|
markers = _correct_splitlines_in_headers(markers, lines)
|
||||||
|
|
||||||
|
return markers
|
||||||
|
|
||||||
|
|
||||||
|
def _mark_quoted_email_splitlines(markers, lines):
|
||||||
|
"""
|
||||||
|
When there are headers indented with '>' characters, this method will
|
||||||
|
attempt to identify if the header is a splitline header. If it is, then we
|
||||||
|
mark it with 's' instead of leaving it as 'm' and return the new markers.
|
||||||
|
"""
|
||||||
|
# Create a list of markers to easily alter specific characters
|
||||||
|
markerlist = list(markers)
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if markerlist[i] != 'm':
|
||||||
|
continue
|
||||||
|
for pattern in SPLITTER_PATTERNS:
|
||||||
|
matcher = re.search(pattern, line)
|
||||||
|
if matcher:
|
||||||
|
markerlist[i] = 's'
|
||||||
|
break
|
||||||
|
|
||||||
|
return "".join(markerlist)
|
||||||
|
|
||||||
|
|
||||||
|
def _correct_splitlines_in_headers(markers, lines):
|
||||||
|
"""
|
||||||
|
Corrects markers by removing splitlines deemed to be inside header blocks.
|
||||||
|
"""
|
||||||
|
updated_markers = ""
|
||||||
|
i = 0
|
||||||
|
in_header_block = False
|
||||||
|
for m in markers:
|
||||||
|
# Only set in_header_block flag when we hit an 's' and line is a header
|
||||||
|
if m == 's':
|
||||||
|
if not in_header_block:
|
||||||
|
if bool(re.search(RE_HEADER, lines[i])):
|
||||||
|
in_header_block = True
|
||||||
|
else:
|
||||||
|
if QUOT_PATTERN.match(lines[i]):
|
||||||
|
m = 'm'
|
||||||
|
else:
|
||||||
|
m = 't'
|
||||||
|
|
||||||
|
# If the line is not a header line, set in_header_block false.
|
||||||
|
if not bool(re.search(RE_HEADER, lines[i])):
|
||||||
|
in_header_block = False
|
||||||
|
|
||||||
|
# Add the marker to the new updated markers string.
|
||||||
|
updated_markers += m
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return updated_markers
|
||||||
|
|
||||||
|
|
||||||
def _readable_text_empty(html_tree):
|
def _readable_text_empty(html_tree):
|
||||||
@@ -456,10 +644,10 @@ def _readable_text_empty(html_tree):
|
|||||||
|
|
||||||
|
|
||||||
def is_splitter(line):
|
def is_splitter(line):
|
||||||
'''
|
"""
|
||||||
Returns Matcher object if provided string is a splitter and
|
Returns Matcher object if provided string is a splitter and
|
||||||
None otherwise.
|
None otherwise.
|
||||||
'''
|
"""
|
||||||
for pattern in SPLITTER_PATTERNS:
|
for pattern in SPLITTER_PATTERNS:
|
||||||
matcher = re.match(pattern, line)
|
matcher = re.match(pattern, line)
|
||||||
if matcher:
|
if matcher:
|
||||||
@@ -467,12 +655,12 @@ def is_splitter(line):
|
|||||||
|
|
||||||
|
|
||||||
def text_content(context):
|
def text_content(context):
|
||||||
'''XPath Extension function to return a node text content.'''
|
"""XPath Extension function to return a node text content."""
|
||||||
return context.context_node.text_content().strip()
|
return context.context_node.xpath("string()").strip()
|
||||||
|
|
||||||
|
|
||||||
def tail(context):
|
def tail(context):
|
||||||
'''XPath Extension function to return a node tail text.'''
|
"""XPath Extension function to return a node tail text."""
|
||||||
return context.context_node.tail or ''
|
return context.context_node.tail or ''
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -23,17 +23,14 @@ trained against, don't forget to regenerate:
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from . import extraction
|
from talon.signature import extraction
|
||||||
from . extraction import extract #noqa
|
from talon.signature.extraction import extract
|
||||||
from . learning import classifier
|
from talon.signature.learning import classifier
|
||||||
|
|
||||||
|
|
||||||
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
|
|
||||||
|
|
||||||
EXTRACTOR_FILENAME = os.path.join(DATA_DIR, 'classifier')
|
|
||||||
EXTRACTOR_DATA = os.path.join(DATA_DIR, 'train.data')
|
|
||||||
|
|
||||||
|
|
||||||
def initialize():
|
def initialize():
|
||||||
extraction.EXTRACTOR = classifier.load(EXTRACTOR_FILENAME,
|
data_dir = os.path.join(os.path.dirname(__file__), 'data')
|
||||||
EXTRACTOR_DATA)
|
extractor_filename = os.path.join(data_dir, 'classifier')
|
||||||
|
extractor_data_filename = os.path.join(data_dir, 'train.data')
|
||||||
|
extraction.EXTRACTOR = classifier.load(extractor_filename,
|
||||||
|
extractor_data_filename)
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
from talon.utils import get_delimiter
|
|
||||||
from talon.signature.constants import (SIGNATURE_MAX_LINES,
|
from talon.signature.constants import (SIGNATURE_MAX_LINES,
|
||||||
TOO_LONG_SIGNATURE_LINE)
|
TOO_LONG_SIGNATURE_LINE)
|
||||||
|
from talon.utils import get_delimiter
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# regex to fetch signature based on common signature words
|
# regex to fetch signature based on common signature words
|
||||||
RE_SIGNATURE = re.compile(r'''
|
RE_SIGNATURE = re.compile(r'''
|
||||||
(
|
(
|
||||||
@@ -28,7 +28,6 @@ RE_SIGNATURE = re.compile(r'''
|
|||||||
)
|
)
|
||||||
''', re.I | re.X | re.M | re.S)
|
''', re.I | re.X | re.M | re.S)
|
||||||
|
|
||||||
|
|
||||||
# signatures appended by phone email clients
|
# signatures appended by phone email clients
|
||||||
RE_PHONE_SIGNATURE = re.compile(r'''
|
RE_PHONE_SIGNATURE = re.compile(r'''
|
||||||
(
|
(
|
||||||
@@ -45,7 +44,6 @@ RE_PHONE_SIGNATURE = re.compile(r'''
|
|||||||
)
|
)
|
||||||
''', re.I | re.X | re.M | re.S)
|
''', re.I | re.X | re.M | re.S)
|
||||||
|
|
||||||
|
|
||||||
# see _mark_candidate_indexes() for details
|
# see _mark_candidate_indexes() for details
|
||||||
# c - could be signature line
|
# c - could be signature line
|
||||||
# d - line starts with dashes (could be signature or list item)
|
# d - line starts with dashes (could be signature or list item)
|
||||||
@@ -112,7 +110,7 @@ def extract_signature(msg_body):
|
|||||||
|
|
||||||
return (stripped_body.strip(),
|
return (stripped_body.strip(),
|
||||||
signature.strip())
|
signature.strip())
|
||||||
except Exception as e:
|
except Exception:
|
||||||
log.exception('ERROR extracting signature')
|
log.exception('ERROR extracting signature')
|
||||||
return (msg_body, None)
|
return (msg_body, None)
|
||||||
|
|
||||||
@@ -163,7 +161,7 @@ def _mark_candidate_indexes(lines, candidate):
|
|||||||
'cdc'
|
'cdc'
|
||||||
"""
|
"""
|
||||||
# at first consider everything to be potential signature lines
|
# at first consider everything to be potential signature lines
|
||||||
markers = bytearray('c'*len(candidate))
|
markers = list('c' * len(candidate))
|
||||||
|
|
||||||
# mark lines starting from bottom up
|
# mark lines starting from bottom up
|
||||||
for i, line_idx in reversed(list(enumerate(candidate))):
|
for i, line_idx in reversed(list(enumerate(candidate))):
|
||||||
@@ -174,7 +172,7 @@ def _mark_candidate_indexes(lines, candidate):
|
|||||||
if line.startswith('-') and line.strip("-"):
|
if line.startswith('-') and line.strip("-"):
|
||||||
markers[i] = 'd'
|
markers[i] = 'd'
|
||||||
|
|
||||||
return markers
|
return "".join(markers)
|
||||||
|
|
||||||
|
|
||||||
def _process_marked_candidate_indexes(candidate, markers):
|
def _process_marked_candidate_indexes(candidate, markers):
|
||||||
|
|||||||
1
talon/signature/data/__init__.py
Normal file
1
talon/signature/data/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
|
||||||
Binary file not shown.
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,15 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import regex as re
|
|
||||||
import numpy
|
import numpy
|
||||||
|
import regex as re
|
||||||
from talon.signature.learning.featurespace import features, build_pattern
|
|
||||||
from talon.utils import get_delimiter
|
|
||||||
from talon.signature.bruteforce import get_signature_candidate
|
from talon.signature.bruteforce import get_signature_candidate
|
||||||
|
from talon.signature.learning.featurespace import features, build_pattern
|
||||||
from talon.signature.learning.helpers import has_signature
|
from talon.signature.learning.helpers import has_signature
|
||||||
|
from talon.utils import get_delimiter
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -33,7 +32,7 @@ RE_REVERSE_SIGNATURE = re.compile(r'''
|
|||||||
|
|
||||||
def is_signature_line(line, sender, classifier):
|
def is_signature_line(line, sender, classifier):
|
||||||
'''Checks if the line belongs to signature. Returns True or False.'''
|
'''Checks if the line belongs to signature. Returns True or False.'''
|
||||||
data = numpy.array(build_pattern(line, features(sender)))
|
data = numpy.array(build_pattern(line, features(sender))).reshape(1, -1)
|
||||||
return classifier.predict(data) > 0
|
return classifier.predict(data) > 0
|
||||||
|
|
||||||
|
|
||||||
@@ -58,7 +57,7 @@ def extract(body, sender):
|
|||||||
text = delimiter.join(text)
|
text = delimiter.join(text)
|
||||||
if text.strip():
|
if text.strip():
|
||||||
return (text, delimiter.join(signature))
|
return (text, delimiter.join(signature))
|
||||||
except Exception:
|
except Exception as e:
|
||||||
log.exception('ERROR when extracting signature with classifiers')
|
log.exception('ERROR when extracting signature with classifiers')
|
||||||
|
|
||||||
return (body, None)
|
return (body, None)
|
||||||
@@ -81,7 +80,7 @@ def _mark_lines(lines, sender):
|
|||||||
candidate = get_signature_candidate(lines)
|
candidate = get_signature_candidate(lines)
|
||||||
|
|
||||||
# at first consider everything to be text no signature
|
# at first consider everything to be text no signature
|
||||||
markers = bytearray('t'*len(lines))
|
markers = list('t' * len(lines))
|
||||||
|
|
||||||
# mark lines starting from bottom up
|
# mark lines starting from bottom up
|
||||||
# mark only lines that belong to candidate
|
# mark only lines that belong to candidate
|
||||||
@@ -96,7 +95,7 @@ def _mark_lines(lines, sender):
|
|||||||
elif is_signature_line(line, sender, EXTRACTOR):
|
elif is_signature_line(line, sender, EXTRACTOR):
|
||||||
markers[j] = 's'
|
markers[j] = 's'
|
||||||
|
|
||||||
return markers
|
return "".join(markers)
|
||||||
|
|
||||||
|
|
||||||
def _process_marked_lines(lines, markers):
|
def _process_marked_lines(lines, markers):
|
||||||
@@ -111,3 +110,4 @@ def _process_marked_lines(lines, markers):
|
|||||||
return (lines[:-signature.end()], lines[-signature.end():])
|
return (lines[:-signature.end()], lines[-signature.end():])
|
||||||
|
|
||||||
return (lines, None)
|
return (lines, None)
|
||||||
|
|
||||||
|
|||||||
@@ -6,9 +6,10 @@ body belongs to the signature.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
from numpy import genfromtxt
|
from numpy import genfromtxt
|
||||||
|
import joblib
|
||||||
from sklearn.svm import LinearSVC
|
from sklearn.svm import LinearSVC
|
||||||
from sklearn.externals import joblib
|
|
||||||
|
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
@@ -29,4 +30,40 @@ def train(classifier, train_data_filename, save_classifier_filename=None):
|
|||||||
|
|
||||||
def load(saved_classifier_filename, train_data_filename):
|
def load(saved_classifier_filename, train_data_filename):
|
||||||
"""Loads saved classifier. """
|
"""Loads saved classifier. """
|
||||||
|
try:
|
||||||
return joblib.load(saved_classifier_filename)
|
return joblib.load(saved_classifier_filename)
|
||||||
|
except Exception:
|
||||||
|
import sys
|
||||||
|
if sys.version_info > (3, 0):
|
||||||
|
return load_compat(saved_classifier_filename)
|
||||||
|
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def load_compat(saved_classifier_filename):
|
||||||
|
import os
|
||||||
|
import pickle
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
# we need to switch to the data path to properly load the related _xx.npy files
|
||||||
|
cwd = os.getcwd()
|
||||||
|
os.chdir(os.path.dirname(saved_classifier_filename))
|
||||||
|
|
||||||
|
# convert encoding using pick.load and write to temp file which we'll tell joblib to use
|
||||||
|
pickle_file = open(saved_classifier_filename, 'rb')
|
||||||
|
classifier = pickle.load(pickle_file, encoding='latin1')
|
||||||
|
|
||||||
|
try:
|
||||||
|
# save our conversion if permissions allow
|
||||||
|
joblib.dump(classifier, saved_classifier_filename)
|
||||||
|
except Exception:
|
||||||
|
# can't write to classifier, use a temp file
|
||||||
|
tmp = tempfile.SpooledTemporaryFile()
|
||||||
|
joblib.dump(classifier, tmp)
|
||||||
|
saved_classifier_filename = tmp
|
||||||
|
|
||||||
|
# important, use joblib.load before switching back to original cwd
|
||||||
|
jb_classifier = joblib.load(saved_classifier_filename)
|
||||||
|
os.chdir(cwd)
|
||||||
|
|
||||||
|
return jb_classifier
|
||||||
|
|||||||
@@ -17,13 +17,14 @@ suffix which should be `_sender`.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import regex as re
|
import regex as re
|
||||||
|
from six.moves import range
|
||||||
|
|
||||||
from talon.signature.constants import SIGNATURE_MAX_LINES
|
from talon.signature.constants import SIGNATURE_MAX_LINES
|
||||||
from talon.signature.learning.featurespace import build_pattern, features
|
from talon.signature.learning.featurespace import build_pattern, features
|
||||||
from six.moves import range
|
|
||||||
|
|
||||||
|
|
||||||
SENDER_SUFFIX = '_sender'
|
SENDER_SUFFIX = '_sender'
|
||||||
BODY_SUFFIX = '_body'
|
BODY_SUFFIX = '_body'
|
||||||
@@ -57,9 +58,14 @@ def parse_msg_sender(filename, sender_known=True):
|
|||||||
algorithm:
|
algorithm:
|
||||||
>>> parse_msg_sender(filename, False)
|
>>> parse_msg_sender(filename, False)
|
||||||
"""
|
"""
|
||||||
|
import sys
|
||||||
|
kwargs = {}
|
||||||
|
if sys.version_info > (3, 0):
|
||||||
|
kwargs["encoding"] = "utf8"
|
||||||
|
|
||||||
sender, msg = None, None
|
sender, msg = None, None
|
||||||
if os.path.isfile(filename) and not is_sender_filename(filename):
|
if os.path.isfile(filename) and not is_sender_filename(filename):
|
||||||
with open(filename) as f:
|
with open(filename, **kwargs) as f:
|
||||||
msg = f.read()
|
msg = f.read()
|
||||||
sender = u''
|
sender = u''
|
||||||
if sender_known:
|
if sender_known:
|
||||||
|
|||||||
@@ -5,21 +5,17 @@
|
|||||||
* regexp's constants used when evaluating signature's features
|
* regexp's constants used when evaluating signature's features
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
import unicodedata
|
import unicodedata
|
||||||
|
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
from talon.utils import to_unicode
|
|
||||||
|
|
||||||
from talon.signature.constants import SIGNATURE_MAX_LINES
|
from talon.signature.constants import SIGNATURE_MAX_LINES
|
||||||
|
|
||||||
|
|
||||||
rc = re.compile
|
rc = re.compile
|
||||||
|
|
||||||
RE_EMAIL = rc('\S@\S')
|
RE_EMAIL = rc('\S@\S')
|
||||||
RE_RELAX_PHONE = rc('(\(? ?[\d]{2,3} ?\)?.{,3}?){2,}')
|
RE_RELAX_PHONE = rc('(\(? ?[\d]{2,3} ?\)?.{,3}?){2,}')
|
||||||
RE_URL = rc(r'''https?://|www\.[\S]+\.[\S]''')
|
RE_URL = rc(r"""https?://|www\.[\S]+\.[\S]""")
|
||||||
|
|
||||||
# Taken from:
|
# Taken from:
|
||||||
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
|
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
|
||||||
@@ -55,7 +51,7 @@ BAD_SENDER_NAMES = [
|
|||||||
|
|
||||||
|
|
||||||
def binary_regex_search(prog):
|
def binary_regex_search(prog):
|
||||||
'''Returns a function that returns 1 or 0 depending on regex search result.
|
"""Returns a function that returns 1 or 0 depending on regex search result.
|
||||||
|
|
||||||
If regular expression compiled into prog is present in a string
|
If regular expression compiled into prog is present in a string
|
||||||
the result of calling the returned function with the string will be 1
|
the result of calling the returned function with the string will be 1
|
||||||
@@ -66,12 +62,12 @@ def binary_regex_search(prog):
|
|||||||
1
|
1
|
||||||
>>> binary_regex_search(re.compile("12"))("34")
|
>>> binary_regex_search(re.compile("12"))("34")
|
||||||
0
|
0
|
||||||
'''
|
"""
|
||||||
return lambda s: 1 if prog.search(s) else 0
|
return lambda s: 1 if prog.search(s) else 0
|
||||||
|
|
||||||
|
|
||||||
def binary_regex_match(prog):
|
def binary_regex_match(prog):
|
||||||
'''Returns a function that returns 1 or 0 depending on regex match result.
|
"""Returns a function that returns 1 or 0 depending on regex match result.
|
||||||
|
|
||||||
If a string matches regular expression compiled into prog
|
If a string matches regular expression compiled into prog
|
||||||
the result of calling the returned function with the string will be 1
|
the result of calling the returned function with the string will be 1
|
||||||
@@ -82,7 +78,7 @@ def binary_regex_match(prog):
|
|||||||
1
|
1
|
||||||
>>> binary_regex_match(re.compile("12"))("3 12")
|
>>> binary_regex_match(re.compile("12"))("3 12")
|
||||||
0
|
0
|
||||||
'''
|
"""
|
||||||
return lambda s: 1 if prog.match(s) else 0
|
return lambda s: 1 if prog.match(s) else 0
|
||||||
|
|
||||||
|
|
||||||
@@ -102,7 +98,7 @@ def flatten_list(list_to_flatten):
|
|||||||
|
|
||||||
|
|
||||||
def contains_sender_names(sender):
|
def contains_sender_names(sender):
|
||||||
'''Returns a functions to search sender\'s name or it\'s part.
|
"""Returns a functions to search sender\'s name or it\'s part.
|
||||||
|
|
||||||
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
|
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
|
||||||
>>> feature("Sergey Obukhov")
|
>>> feature("Sergey Obukhov")
|
||||||
@@ -115,7 +111,7 @@ def contains_sender_names(sender):
|
|||||||
1
|
1
|
||||||
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
|
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
|
||||||
1
|
1
|
||||||
'''
|
"""
|
||||||
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
|
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
|
||||||
for e in extract_names(sender)]))
|
for e in extract_names(sender)]))
|
||||||
names = names or sender
|
names = names or sender
|
||||||
@@ -135,20 +131,25 @@ def extract_names(sender):
|
|||||||
>>> extract_names('')
|
>>> extract_names('')
|
||||||
[]
|
[]
|
||||||
"""
|
"""
|
||||||
sender = to_unicode(sender, precise=True)
|
|
||||||
# Remove non-alphabetical characters
|
# Remove non-alphabetical characters
|
||||||
sender = "".join([char if char.isalpha() else ' ' for char in sender])
|
sender = "".join([char if char.isalpha() else ' ' for char in sender])
|
||||||
# Remove too short words and words from "black" list i.e.
|
# Remove too short words and words from "black" list i.e.
|
||||||
# words like `ru`, `gmail`, `com`, `org`, etc.
|
# words like `ru`, `gmail`, `com`, `org`, etc.
|
||||||
sender = [word for word in sender.split() if len(word) > 1 and
|
names = list()
|
||||||
not word in BAD_SENDER_NAMES]
|
for word in sender.split():
|
||||||
# Remove duplicates
|
if len(word) < 2:
|
||||||
names = list(set(sender))
|
continue
|
||||||
|
if word in BAD_SENDER_NAMES:
|
||||||
|
continue
|
||||||
|
if word in names:
|
||||||
|
continue
|
||||||
|
names.append(word)
|
||||||
|
|
||||||
return names
|
return names
|
||||||
|
|
||||||
|
|
||||||
def categories_percent(s, categories):
|
def categories_percent(s, categories):
|
||||||
'''Returns category characters percent.
|
"""Returns category characters percent.
|
||||||
|
|
||||||
>>> categories_percent("qqq ggg hhh", ["Po"])
|
>>> categories_percent("qqq ggg hhh", ["Po"])
|
||||||
0.0
|
0.0
|
||||||
@@ -160,9 +161,8 @@ def categories_percent(s, categories):
|
|||||||
50.0
|
50.0
|
||||||
>>> categories_percent("s.s,5s", ["Po", "Nd"])
|
>>> categories_percent("s.s,5s", ["Po", "Nd"])
|
||||||
50.0
|
50.0
|
||||||
'''
|
"""
|
||||||
count = 0
|
count = 0
|
||||||
s = to_unicode(s, precise=True)
|
|
||||||
for c in s:
|
for c in s:
|
||||||
if unicodedata.category(c) in categories:
|
if unicodedata.category(c) in categories:
|
||||||
count += 1
|
count += 1
|
||||||
@@ -170,19 +170,18 @@ def categories_percent(s, categories):
|
|||||||
|
|
||||||
|
|
||||||
def punctuation_percent(s):
|
def punctuation_percent(s):
|
||||||
'''Returns punctuation percent.
|
"""Returns punctuation percent.
|
||||||
|
|
||||||
>>> punctuation_percent("qqq ggg hhh")
|
>>> punctuation_percent("qqq ggg hhh")
|
||||||
0.0
|
0.0
|
||||||
>>> punctuation_percent("q,w.")
|
>>> punctuation_percent("q,w.")
|
||||||
50.0
|
50.0
|
||||||
'''
|
"""
|
||||||
return categories_percent(s, ['Po'])
|
return categories_percent(s, ['Po'])
|
||||||
|
|
||||||
|
|
||||||
def capitalized_words_percent(s):
|
def capitalized_words_percent(s):
|
||||||
'''Returns capitalized words percent.'''
|
"""Returns capitalized words percent."""
|
||||||
s = to_unicode(s, precise=True)
|
|
||||||
words = re.split('\s', s)
|
words = re.split('\s', s)
|
||||||
words = [w for w in words if w.strip()]
|
words = [w for w in words if w.strip()]
|
||||||
words = [w for w in words if len(w) > 2]
|
words = [w for w in words if len(w) > 2]
|
||||||
@@ -208,20 +207,26 @@ def many_capitalized_words(s):
|
|||||||
|
|
||||||
|
|
||||||
def has_signature(body, sender):
|
def has_signature(body, sender):
|
||||||
'''Checks if the body has signature. Returns True or False.'''
|
"""Checks if the body has signature. Returns True or False."""
|
||||||
non_empty = [line for line in body.splitlines() if line.strip()]
|
non_empty = [line for line in body.splitlines() if line.strip()]
|
||||||
candidate = non_empty[-SIGNATURE_MAX_LINES:]
|
candidate = non_empty[-SIGNATURE_MAX_LINES:]
|
||||||
upvotes = 0
|
upvotes = 0
|
||||||
|
sender_check = contains_sender_names(sender)
|
||||||
for line in candidate:
|
for line in candidate:
|
||||||
# we check lines for sender's name, phone, email and url,
|
# we check lines for sender's name, phone, email and url,
|
||||||
# those signature lines don't take more then 27 lines
|
# those signature lines don't take more then 27 lines
|
||||||
if len(line.strip()) > 27:
|
if len(line.strip()) > 27:
|
||||||
continue
|
continue
|
||||||
elif contains_sender_names(sender)(line):
|
|
||||||
|
if sender_check(line):
|
||||||
return True
|
return True
|
||||||
elif (binary_regex_search(RE_RELAX_PHONE)(line) +
|
|
||||||
|
if (binary_regex_search(RE_RELAX_PHONE)(line) +
|
||||||
binary_regex_search(RE_EMAIL)(line) +
|
binary_regex_search(RE_EMAIL)(line) +
|
||||||
binary_regex_search(RE_URL)(line) == 1):
|
binary_regex_search(RE_URL)(line) == 1):
|
||||||
upvotes += 1
|
upvotes += 1
|
||||||
|
|
||||||
if upvotes > 1:
|
if upvotes > 1:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|||||||
179
talon/utils.py
179
talon/utils.py
@@ -1,109 +1,17 @@
|
|||||||
# coding:utf-8
|
# coding:utf-8
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from __future__ import absolute_import
|
import html5lib
|
||||||
import logging
|
|
||||||
from random import shuffle
|
|
||||||
import chardet
|
|
||||||
import cchardet
|
|
||||||
import regex as re
|
import regex as re
|
||||||
|
from html5lib import HTMLParser
|
||||||
from lxml import html
|
|
||||||
from lxml.cssselect import CSSSelector
|
from lxml.cssselect import CSSSelector
|
||||||
|
from lxml.etree import _Element
|
||||||
|
from lxml.html import html5parser
|
||||||
|
|
||||||
from talon.constants import RE_DELIMITER
|
from talon.constants import RE_DELIMITER
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
def safe_format(format_string, *args, **kwargs):
|
def get_delimiter(msg_body: str) -> str:
|
||||||
"""
|
|
||||||
Helper: formats string with any combination of bytestrings/unicode
|
|
||||||
strings without raising exceptions
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if not args and not kwargs:
|
|
||||||
return format_string
|
|
||||||
else:
|
|
||||||
return format_string.format(*args, **kwargs)
|
|
||||||
|
|
||||||
# catch encoding errors and transform everything into utf-8 string
|
|
||||||
# before logging:
|
|
||||||
except (UnicodeEncodeError, UnicodeDecodeError):
|
|
||||||
format_string = to_utf8(format_string)
|
|
||||||
args = [to_utf8(p) for p in args]
|
|
||||||
kwargs = {k: to_utf8(v) for k, v in six.iteritems(kwargs)}
|
|
||||||
return format_string.format(*args, **kwargs)
|
|
||||||
|
|
||||||
# ignore other errors
|
|
||||||
except:
|
|
||||||
return u''
|
|
||||||
|
|
||||||
|
|
||||||
def to_unicode(str_or_unicode, precise=False):
|
|
||||||
"""
|
|
||||||
Safely returns a unicode version of a given string
|
|
||||||
>>> utils.to_unicode('привет')
|
|
||||||
u'привет'
|
|
||||||
>>> utils.to_unicode(u'привет')
|
|
||||||
u'привет'
|
|
||||||
If `precise` flag is True, tries to guess the correct encoding first.
|
|
||||||
"""
|
|
||||||
if not isinstance(str_or_unicode, six.text_type):
|
|
||||||
encoding = quick_detect_encoding(str_or_unicode) if precise else 'utf-8'
|
|
||||||
return six.text_type(str_or_unicode, encoding, 'replace')
|
|
||||||
return str_or_unicode
|
|
||||||
|
|
||||||
|
|
||||||
def detect_encoding(string):
|
|
||||||
"""
|
|
||||||
Tries to detect the encoding of the passed string.
|
|
||||||
|
|
||||||
Defaults to UTF-8.
|
|
||||||
"""
|
|
||||||
assert isinstance(string, bytes)
|
|
||||||
try:
|
|
||||||
detected = chardet.detect(string)
|
|
||||||
if detected:
|
|
||||||
return detected.get('encoding') or 'utf-8'
|
|
||||||
except Exception as e:
|
|
||||||
pass
|
|
||||||
return 'utf-8'
|
|
||||||
|
|
||||||
|
|
||||||
def quick_detect_encoding(string):
|
|
||||||
"""
|
|
||||||
Tries to detect the encoding of the passed string.
|
|
||||||
|
|
||||||
Uses cchardet. Fallbacks to detect_encoding.
|
|
||||||
"""
|
|
||||||
assert isinstance(string, bytes)
|
|
||||||
try:
|
|
||||||
detected = cchardet.detect(string)
|
|
||||||
if detected:
|
|
||||||
return detected.get('encoding') or detect_encoding(string)
|
|
||||||
except Exception as e:
|
|
||||||
pass
|
|
||||||
return detect_encoding(string)
|
|
||||||
|
|
||||||
|
|
||||||
def to_utf8(str_or_unicode):
|
|
||||||
"""
|
|
||||||
Safely returns a UTF-8 version of a given string
|
|
||||||
>>> utils.to_utf8(u'hi')
|
|
||||||
'hi'
|
|
||||||
"""
|
|
||||||
if not isinstance(str_or_unicode, six.text_type):
|
|
||||||
return str_or_unicode.encode("utf-8", "ignore")
|
|
||||||
return str(str_or_unicode)
|
|
||||||
|
|
||||||
|
|
||||||
def random_token(length=7):
|
|
||||||
vals = ("a b c d e f g h i j k l m n o p q r s t u v w x y z "
|
|
||||||
"0 1 2 3 4 5 6 7 8 9").split(' ')
|
|
||||||
shuffle(vals)
|
|
||||||
return ''.join(vals[:length])
|
|
||||||
|
|
||||||
|
|
||||||
def get_delimiter(msg_body):
|
|
||||||
delimiter = RE_DELIMITER.search(msg_body)
|
delimiter = RE_DELIMITER.search(msg_body)
|
||||||
if delimiter:
|
if delimiter:
|
||||||
delimiter = delimiter.group()
|
delimiter = delimiter.group()
|
||||||
@@ -112,7 +20,8 @@ def get_delimiter(msg_body):
|
|||||||
|
|
||||||
return delimiter
|
return delimiter
|
||||||
|
|
||||||
def html_tree_to_text(tree):
|
|
||||||
|
def html_tree_to_text(tree: _Element) -> str:
|
||||||
for style in CSSSelector('style')(tree):
|
for style in CSSSelector('style')(tree):
|
||||||
style.getparent().remove(style)
|
style.getparent().remove(style)
|
||||||
|
|
||||||
@@ -120,7 +29,7 @@ def html_tree_to_text(tree):
|
|||||||
parent = c.getparent()
|
parent = c.getparent()
|
||||||
|
|
||||||
# comment with no parent does not impact produced text
|
# comment with no parent does not impact produced text
|
||||||
if not parent:
|
if parent is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
parent.remove(c)
|
parent.remove(c)
|
||||||
@@ -129,7 +38,7 @@ def html_tree_to_text(tree):
|
|||||||
for el in tree.iter():
|
for el in tree.iter():
|
||||||
el_text = (el.text or '') + (el.tail or '')
|
el_text = (el.text or '') + (el.tail or '')
|
||||||
if len(el_text) > 1:
|
if len(el_text) > 1:
|
||||||
if el.tag in _BLOCKTAGS:
|
if el.tag in _BLOCKTAGS + _HARDBREAKS:
|
||||||
text += "\n"
|
text += "\n"
|
||||||
if el.tag == 'li':
|
if el.tag == 'li':
|
||||||
text += " * "
|
text += " * "
|
||||||
@@ -140,63 +49,87 @@ def html_tree_to_text(tree):
|
|||||||
if href:
|
if href:
|
||||||
text += "(%s) " % href
|
text += "(%s) " % href
|
||||||
|
|
||||||
if el.tag in _HARDBREAKS and text and not text.endswith("\n"):
|
if (el.tag in _HARDBREAKS and text and
|
||||||
|
not text.endswith("\n") and not el_text):
|
||||||
text += "\n"
|
text += "\n"
|
||||||
|
|
||||||
retval = _rm_excessive_newlines(text)
|
text = _rm_excessive_newlines(text)
|
||||||
return _encode_utf8(retval)
|
return text
|
||||||
|
|
||||||
|
|
||||||
def html_to_text(string):
|
def html_to_text(s: str) -> str | None:
|
||||||
"""
|
"""
|
||||||
Dead-simple HTML-to-text converter:
|
Dead-simple HTML-to-text converter:
|
||||||
>>> html_to_text("one<br>two<br>three")
|
>>> html_to_text("one<br>two<br>three")
|
||||||
>>> "one\ntwo\nthree"
|
<<< "one\ntwo\nthree"
|
||||||
|
|
||||||
NOTES:
|
NOTES:
|
||||||
1. the string is expected to contain UTF-8 encoded HTML!
|
1. the string is expected to contain UTF-8 encoded HTML!
|
||||||
2. returns utf-8 encoded str (not unicode)
|
3. if html can't be parsed returns None
|
||||||
"""
|
"""
|
||||||
if isinstance(string, six.text_type):
|
s = _prepend_utf8_declaration(s)
|
||||||
string = string.encode('utf8')
|
s = s.replace("\n", "")
|
||||||
|
tree = html_fromstring(s)
|
||||||
|
|
||||||
s = _prepend_utf8_declaration(string)
|
if tree is None:
|
||||||
s = s.replace(b"\n", b"")
|
return None
|
||||||
|
|
||||||
tree = html.fromstring(s)
|
|
||||||
return html_tree_to_text(tree)
|
return html_tree_to_text(tree)
|
||||||
|
|
||||||
|
|
||||||
def _contains_charset_spec(s):
|
def html_fromstring(s: str) -> _Element:
|
||||||
|
"""Parse html tree from string. Return None if the string can't be parsed.
|
||||||
|
"""
|
||||||
|
return html5parser.fromstring(s, parser=_html5lib_parser())
|
||||||
|
|
||||||
|
|
||||||
|
def html_document_fromstring(s: str) -> _Element:
|
||||||
|
"""Parse html tree from string. Return None if the string can't be parsed.
|
||||||
|
"""
|
||||||
|
return html5parser.document_fromstring(s, parser=_html5lib_parser())
|
||||||
|
|
||||||
|
|
||||||
|
def cssselect(expr: str, tree: str) -> list[_Element]:
|
||||||
|
return CSSSelector(expr)(tree)
|
||||||
|
|
||||||
|
|
||||||
|
def _contains_charset_spec(s: str) -> str:
|
||||||
"""Return True if the first 4KB contain charset spec
|
"""Return True if the first 4KB contain charset spec
|
||||||
"""
|
"""
|
||||||
return s.lower().find(b'html; charset=', 0, 4096) != -1
|
return s.lower().find('html; charset=', 0, 4096) != -1
|
||||||
|
|
||||||
|
|
||||||
def _prepend_utf8_declaration(s):
|
def _prepend_utf8_declaration(s: str) -> str:
|
||||||
"""Prepend 'utf-8' encoding declaration if the first 4KB don't have any
|
"""Prepend 'utf-8' encoding declaration if the first 4KB don't have any
|
||||||
"""
|
"""
|
||||||
return s if _contains_charset_spec(s) else _UTF8_DECLARATION + s
|
return s if _contains_charset_spec(s) else _UTF8_DECLARATION + s
|
||||||
|
|
||||||
|
|
||||||
def _rm_excessive_newlines(s):
|
def _rm_excessive_newlines(s: str) -> str:
|
||||||
"""Remove excessive newlines that often happen due to tons of divs
|
"""Remove excessive newlines that often happen due to tons of divs
|
||||||
"""
|
"""
|
||||||
return _RE_EXCESSIVE_NEWLINES.sub("\n\n", s).strip()
|
return _RE_EXCESSIVE_NEWLINES.sub("\n\n", s).strip()
|
||||||
|
|
||||||
|
|
||||||
def _encode_utf8(s):
|
def _html5lib_parser() -> HTMLParser:
|
||||||
"""Encode in 'utf-8' if unicode
|
|
||||||
"""
|
"""
|
||||||
return s.encode('utf-8') if isinstance(s, six.text_type) else s
|
html5lib is a pure-python library that conforms to the WHATWG HTML spec
|
||||||
|
and is not vulnarable to certain attacks common for XML libraries
|
||||||
|
"""
|
||||||
|
return HTMLParser(
|
||||||
|
# build lxml tree
|
||||||
|
html5lib.treebuilders.getTreeBuilder("lxml"),
|
||||||
|
# remove namespace value from inside lxml.html.html5paser element tag
|
||||||
|
# otherwise it yields something like "{http://www.w3.org/1999/xhtml}div"
|
||||||
|
# instead of "div", throwing the algo off
|
||||||
|
namespaceHTMLElements=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
_UTF8_DECLARATION = (b'<meta http-equiv="Content-Type" content="text/html;'
|
_UTF8_DECLARATION = ('<meta http-equiv="Content-Type" content="text/html;'
|
||||||
b'charset=utf-8">')
|
'charset=utf-8">')
|
||||||
|
|
||||||
|
|
||||||
_BLOCKTAGS = ['div', 'p', 'ul', 'li', 'h1', 'h2', 'h3']
|
_BLOCKTAGS = ['div', 'p', 'ul', 'li', 'h1', 'h2', 'h3']
|
||||||
_HARDBREAKS = ['br', 'hr', 'tr']
|
_HARDBREAKS = ['br', 'hr', 'tr']
|
||||||
|
|
||||||
|
|
||||||
_RE_EXCESSIVE_NEWLINES = re.compile("\n{2,10}")
|
_RE_EXCESSIVE_NEWLINES = re.compile("\n{2,10}")
|
||||||
|
|||||||
3
test-requirements.txt
Normal file
3
test-requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
coverage
|
||||||
|
mock
|
||||||
|
nose>=1.2.1
|
||||||
@@ -1,16 +1,20 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from . import *
|
|
||||||
from . fixtures import *
|
|
||||||
|
|
||||||
import regex as re
|
# noinspection PyUnresolvedReferences
|
||||||
|
import re
|
||||||
|
from unittest.mock import Mock, patch
|
||||||
|
|
||||||
|
from nose.tools import assert_false, assert_true, eq_, ok_
|
||||||
|
|
||||||
|
from tests.fixtures import (OLK_SRC_BODY_SECTION,
|
||||||
|
REPLY_QUOTATIONS_SHARE_BLOCK,
|
||||||
|
REPLY_SEPARATED_BY_HR)
|
||||||
from talon import quotations, utils as u
|
from talon import quotations, utils as u
|
||||||
|
|
||||||
|
RE_WHITESPACE = re.compile(r"\s")
|
||||||
RE_WHITESPACE = re.compile("\s")
|
RE_DOUBLE_WHITESPACE = re.compile(r"\s")
|
||||||
RE_DOUBLE_WHITESPACE = re.compile("\s")
|
|
||||||
|
|
||||||
|
|
||||||
def test_quotation_splitter_inside_blockquote():
|
def test_quotation_splitter_inside_blockquote():
|
||||||
@@ -27,7 +31,7 @@ def test_quotation_splitter_inside_blockquote():
|
|||||||
|
|
||||||
</blockquote>"""
|
</blockquote>"""
|
||||||
|
|
||||||
eq_("<html><body><p>Reply</p></body></html>",
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -44,7 +48,7 @@ def test_quotation_splitter_outside_blockquote():
|
|||||||
</div>
|
</div>
|
||||||
</blockquote>
|
</blockquote>
|
||||||
"""
|
"""
|
||||||
eq_("<html><body><p>Reply</p></body></html>",
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -62,7 +66,7 @@ def test_regular_blockquote():
|
|||||||
</div>
|
</div>
|
||||||
</blockquote>
|
</blockquote>
|
||||||
"""
|
"""
|
||||||
eq_("<html><body><p>Reply</p><blockquote>Regular</blockquote></body></html>",
|
eq_("<html><head></head><body>Reply<blockquote>Regular</blockquote></body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -85,6 +89,7 @@ Reply
|
|||||||
|
|
||||||
reply = """
|
reply = """
|
||||||
<html>
|
<html>
|
||||||
|
<head></head>
|
||||||
<body>
|
<body>
|
||||||
Reply
|
Reply
|
||||||
|
|
||||||
@@ -128,7 +133,7 @@ def test_gmail_quote():
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>"""
|
</div>"""
|
||||||
eq_("<html><body><p>Reply</p></body></html>",
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -139,7 +144,7 @@ def test_gmail_quote_compact():
|
|||||||
'<div>Test</div>' \
|
'<div>Test</div>' \
|
||||||
'</div>' \
|
'</div>' \
|
||||||
'</div>'
|
'</div>'
|
||||||
eq_("<html><body><p>Reply</p></body></html>",
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -164,9 +169,9 @@ def test_unicode_in_reply():
|
|||||||
|
|
||||||
<blockquote>
|
<blockquote>
|
||||||
Quote
|
Quote
|
||||||
</blockquote>""".encode("utf-8")
|
</blockquote>"""
|
||||||
|
|
||||||
eq_("<html><body><p>Reply  Text<br></p><div><br></div>"
|
eq_("<html><head></head><body>Reply  Text<br><div><br></div>"
|
||||||
"</body></html>",
|
"</body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
@@ -192,6 +197,7 @@ def test_blockquote_disclaimer():
|
|||||||
|
|
||||||
stripped_html = """
|
stripped_html = """
|
||||||
<html>
|
<html>
|
||||||
|
<head></head>
|
||||||
<body>
|
<body>
|
||||||
<div>
|
<div>
|
||||||
<div>
|
<div>
|
||||||
@@ -223,7 +229,7 @@ def test_date_block():
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
"""
|
"""
|
||||||
eq_('<html><body><div>message<br></div></body></html>',
|
eq_('<html><head></head><body><div>message<br></div></body></html>',
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -240,7 +246,7 @@ Subject: You Have New Mail From Mary!<br><br>
|
|||||||
text
|
text
|
||||||
</div></div>
|
</div></div>
|
||||||
"""
|
"""
|
||||||
eq_('<html><body><div>message<br></div></body></html>',
|
eq_('<html><head></head><body><div>message<br></div></body></html>',
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -258,7 +264,7 @@ def test_reply_shares_div_with_from_block():
|
|||||||
|
|
||||||
</div>
|
</div>
|
||||||
</body>'''
|
</body>'''
|
||||||
eq_('<html><body><div>Blah<br><br></div></body></html>',
|
eq_('<html><head></head><body><div>Blah<br><br></div></body></html>',
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -269,13 +275,13 @@ def test_reply_quotations_share_block():
|
|||||||
|
|
||||||
|
|
||||||
def test_OLK_SRC_BODY_SECTION_stripped():
|
def test_OLK_SRC_BODY_SECTION_stripped():
|
||||||
eq_('<html><body><div>Reply</div></body></html>',
|
eq_('<html><head></head><body><div>Reply</div></body></html>',
|
||||||
RE_WHITESPACE.sub(
|
RE_WHITESPACE.sub(
|
||||||
'', quotations.extract_from_html(OLK_SRC_BODY_SECTION)))
|
'', quotations.extract_from_html(OLK_SRC_BODY_SECTION)))
|
||||||
|
|
||||||
|
|
||||||
def test_reply_separated_by_hr():
|
def test_reply_separated_by_hr():
|
||||||
eq_('<html><body><div>Hi<div>there</div></div></body></html>',
|
eq_('<html><head></head><body><div>Hi<div>there</div></div></body></html>',
|
||||||
RE_WHITESPACE.sub(
|
RE_WHITESPACE.sub(
|
||||||
'', quotations.extract_from_html(REPLY_SEPARATED_BY_HR)))
|
'', quotations.extract_from_html(REPLY_SEPARATED_BY_HR)))
|
||||||
|
|
||||||
@@ -296,17 +302,21 @@ Reply
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
'''
|
'''
|
||||||
eq_('<html><body><p>Reply</p><div><hr></div></body></html>',
|
eq_('<html><head></head><body>Reply<div><hr></div></body></html>',
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
def extract_reply_and_check(filename):
|
def extract_reply_and_check(filename):
|
||||||
f = open(filename)
|
import sys
|
||||||
|
kwargs = {}
|
||||||
|
if sys.version_info > (3, 0):
|
||||||
|
kwargs["encoding"] = "utf8"
|
||||||
|
|
||||||
|
f = open(filename, **kwargs)
|
||||||
|
|
||||||
msg_body = f.read()
|
msg_body = f.read()
|
||||||
reply = quotations.extract_from_html(msg_body)
|
reply = quotations.extract_from_html(msg_body)
|
||||||
plain_reply = u.html_to_text(reply)
|
plain_reply = u.html_to_text(reply)
|
||||||
plain_reply = plain_reply.decode('utf8')
|
|
||||||
|
|
||||||
eq_(RE_WHITESPACE.sub('', "Hi. I am fine.\n\nThanks,\nAlex"),
|
eq_(RE_WHITESPACE.sub('', "Hi. I am fine.\n\nThanks,\nAlex"),
|
||||||
RE_WHITESPACE.sub('', plain_reply))
|
RE_WHITESPACE.sub('', plain_reply))
|
||||||
@@ -373,7 +383,7 @@ reply
|
|||||||
extracted = quotations.extract_from_html(msg_body)
|
extracted = quotations.extract_from_html(msg_body)
|
||||||
assert_false(symbol in extracted)
|
assert_false(symbol in extracted)
|
||||||
# Keep new lines otherwise "My reply" becomes one word - "Myreply"
|
# Keep new lines otherwise "My reply" becomes one word - "Myreply"
|
||||||
eq_("<html><body><p>My\nreply\n</p></body></html>", extracted)
|
eq_("<html><head></head><body>My\nreply\n</body></html>", extracted)
|
||||||
|
|
||||||
|
|
||||||
def test_gmail_forwarded_msg():
|
def test_gmail_forwarded_msg():
|
||||||
@@ -383,18 +393,6 @@ def test_gmail_forwarded_msg():
|
|||||||
eq_(RE_WHITESPACE.sub('', msg_body), RE_WHITESPACE.sub('', extracted))
|
eq_(RE_WHITESPACE.sub('', msg_body), RE_WHITESPACE.sub('', extracted))
|
||||||
|
|
||||||
|
|
||||||
@patch.object(quotations, 'MAX_HTML_LEN', 1)
|
|
||||||
def test_too_large_html():
|
|
||||||
msg_body = 'Reply' \
|
|
||||||
'<div class="gmail_quote">' \
|
|
||||||
'<div class="gmail_quote">On 11-Apr-2011, at 6:54 PM, Bob <bob@example.com> wrote:' \
|
|
||||||
'<div>Test</div>' \
|
|
||||||
'</div>' \
|
|
||||||
'</div>'
|
|
||||||
eq_(RE_WHITESPACE.sub('', msg_body),
|
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
|
||||||
|
|
||||||
|
|
||||||
def test_readable_html_empty():
|
def test_readable_html_empty():
|
||||||
msg_body = """
|
msg_body = """
|
||||||
<blockquote>
|
<blockquote>
|
||||||
@@ -411,3 +409,29 @@ def test_readable_html_empty():
|
|||||||
|
|
||||||
eq_(RE_WHITESPACE.sub('', msg_body),
|
eq_(RE_WHITESPACE.sub('', msg_body),
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(quotations, 'html_document_fromstring', Mock(return_value=None))
|
||||||
|
def test_bad_html():
|
||||||
|
bad_html = "<html></html>"
|
||||||
|
eq_(bad_html, quotations.extract_from_html(bad_html))
|
||||||
|
|
||||||
|
|
||||||
|
def test_remove_namespaces():
|
||||||
|
msg_body = """
|
||||||
|
<html xmlns:o="urn:schemas-microsoft-com:office:office" xmlns="http://www.w3.org/TR/REC-html40">
|
||||||
|
<body>
|
||||||
|
<o:p>Dear Sir,</o:p>
|
||||||
|
<o:p>Thank you for the email.</o:p>
|
||||||
|
<blockquote>thing</blockquote>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
rendered = quotations.extract_from_html(msg_body)
|
||||||
|
|
||||||
|
assert_true("<p>" in rendered)
|
||||||
|
assert_true("xmlns" in rendered)
|
||||||
|
|
||||||
|
assert_true("<o:p>" not in rendered)
|
||||||
|
assert_true("<xmlns:o>" not in rendered)
|
||||||
|
|||||||
@@ -1,16 +1,16 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from .. import *
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from talon.signature.learning import dataset
|
|
||||||
from talon import signature
|
|
||||||
from talon.signature import extraction as e
|
|
||||||
from talon.signature import bruteforce
|
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
|
|
||||||
|
from talon.signature import bruteforce, extraction, extract
|
||||||
|
from talon.signature import extraction as e
|
||||||
|
from talon.signature.learning import dataset
|
||||||
|
from .. import *
|
||||||
|
|
||||||
|
|
||||||
def test_message_shorter_SIGNATURE_MAX_LINES():
|
def test_message_shorter_SIGNATURE_MAX_LINES():
|
||||||
sender = "bob@foo.bar"
|
sender = "bob@foo.bar"
|
||||||
@@ -18,23 +18,28 @@ def test_message_shorter_SIGNATURE_MAX_LINES():
|
|||||||
|
|
||||||
Thanks in advance,
|
Thanks in advance,
|
||||||
Bob"""
|
Bob"""
|
||||||
text, extracted_signature = signature.extract(body, sender)
|
text, extracted_signature = extract(body, sender)
|
||||||
eq_('\n'.join(body.splitlines()[:2]), text)
|
eq_('\n'.join(body.splitlines()[:2]), text)
|
||||||
eq_('\n'.join(body.splitlines()[-2:]), extracted_signature)
|
eq_('\n'.join(body.splitlines()[-2:]), extracted_signature)
|
||||||
|
|
||||||
|
|
||||||
def test_messages_longer_SIGNATURE_MAX_LINES():
|
def test_messages_longer_SIGNATURE_MAX_LINES():
|
||||||
|
import sys
|
||||||
|
kwargs = {}
|
||||||
|
if sys.version_info > (3, 0):
|
||||||
|
kwargs["encoding"] = "utf8"
|
||||||
|
|
||||||
for filename in os.listdir(STRIPPED):
|
for filename in os.listdir(STRIPPED):
|
||||||
filename = os.path.join(STRIPPED, filename)
|
filename = os.path.join(STRIPPED, filename)
|
||||||
if not filename.endswith('_body'):
|
if not filename.endswith('_body'):
|
||||||
continue
|
continue
|
||||||
sender, body = dataset.parse_msg_sender(filename)
|
sender, body = dataset.parse_msg_sender(filename)
|
||||||
text, extracted_signature = signature.extract(body, sender)
|
text, extracted_signature = extract(body, sender)
|
||||||
extracted_signature = extracted_signature or ''
|
extracted_signature = extracted_signature or ''
|
||||||
with open(filename[:-len('body')] + 'signature') as ms:
|
with open(filename[:-len('body')] + 'signature', **kwargs) as ms:
|
||||||
msg_signature = ms.read()
|
msg_signature = ms.read()
|
||||||
eq_(msg_signature.strip(), extracted_signature.strip())
|
eq_(msg_signature.strip(), extracted_signature.strip())
|
||||||
stripped_msg = body.strip()[:len(body.strip())-len(msg_signature)]
|
stripped_msg = body.strip()[:len(body.strip()) - len(msg_signature)]
|
||||||
eq_(stripped_msg.strip(), text.strip())
|
eq_(stripped_msg.strip(), text.strip())
|
||||||
|
|
||||||
|
|
||||||
@@ -47,7 +52,7 @@ Thanks in advance,
|
|||||||
some text which doesn't seem to be a signature at all
|
some text which doesn't seem to be a signature at all
|
||||||
Bob"""
|
Bob"""
|
||||||
|
|
||||||
text, extracted_signature = signature.extract(body, sender)
|
text, extracted_signature = extract(body, sender)
|
||||||
eq_('\n'.join(body.splitlines()[:2]), text)
|
eq_('\n'.join(body.splitlines()[:2]), text)
|
||||||
eq_('\n'.join(body.splitlines()[-3:]), extracted_signature)
|
eq_('\n'.join(body.splitlines()[-3:]), extracted_signature)
|
||||||
|
|
||||||
@@ -60,7 +65,7 @@ Thanks in advance,
|
|||||||
some long text here which doesn't seem to be a signature at all
|
some long text here which doesn't seem to be a signature at all
|
||||||
Bob"""
|
Bob"""
|
||||||
|
|
||||||
text, extracted_signature = signature.extract(body, sender)
|
text, extracted_signature = extract(body, sender)
|
||||||
eq_('\n'.join(body.splitlines()[:-1]), text)
|
eq_('\n'.join(body.splitlines()[:-1]), text)
|
||||||
eq_('Bob', extracted_signature)
|
eq_('Bob', extracted_signature)
|
||||||
|
|
||||||
@@ -68,13 +73,13 @@ Bob"""
|
|||||||
|
|
||||||
some *long* text here which doesn't seem to be a signature at all
|
some *long* text here which doesn't seem to be a signature at all
|
||||||
"""
|
"""
|
||||||
((body, None), signature.extract(body, "david@example.com"))
|
((body, None), extract(body, "david@example.com"))
|
||||||
|
|
||||||
|
|
||||||
def test_basic():
|
def test_basic():
|
||||||
msg_body = 'Blah\r\n--\r\n\r\nSergey Obukhov'
|
msg_body = 'Blah\r\n--\r\n\r\nSergey Obukhov'
|
||||||
eq_(('Blah', '--\r\n\r\nSergey Obukhov'),
|
eq_(('Blah', '--\r\n\r\nSergey Obukhov'),
|
||||||
signature.extract(msg_body, 'Sergey'))
|
extract(msg_body, 'Sergey'))
|
||||||
|
|
||||||
|
|
||||||
def test_capitalized():
|
def test_capitalized():
|
||||||
@@ -99,7 +104,7 @@ Doe Inc
|
|||||||
Doe Inc
|
Doe Inc
|
||||||
555-531-7967"""
|
555-531-7967"""
|
||||||
|
|
||||||
eq_(sig, signature.extract(msg_body, 'Doe')[1])
|
eq_(sig, extract(msg_body, 'Doe')[1])
|
||||||
|
|
||||||
|
|
||||||
def test_over_2_text_lines_after_signature():
|
def test_over_2_text_lines_after_signature():
|
||||||
@@ -110,25 +115,25 @@ def test_over_2_text_lines_after_signature():
|
|||||||
2 non signature lines in the end
|
2 non signature lines in the end
|
||||||
It's not signature
|
It's not signature
|
||||||
"""
|
"""
|
||||||
text, extracted_signature = signature.extract(body, "Bob")
|
text, extracted_signature = extract(body, "Bob")
|
||||||
eq_(extracted_signature, None)
|
eq_(extracted_signature, None)
|
||||||
|
|
||||||
|
|
||||||
def test_no_signature():
|
def test_no_signature():
|
||||||
sender, body = "bob@foo.bar", "Hello"
|
sender, body = "bob@foo.bar", "Hello"
|
||||||
eq_((body, None), signature.extract(body, sender))
|
eq_((body, None), extract(body, sender))
|
||||||
|
|
||||||
|
|
||||||
def test_handles_unicode():
|
def test_handles_unicode():
|
||||||
sender, body = dataset.parse_msg_sender(UNICODE_MSG)
|
sender, body = dataset.parse_msg_sender(UNICODE_MSG)
|
||||||
text, extracted_signature = signature.extract(body, sender)
|
text, extracted_signature = extract(body, sender)
|
||||||
|
|
||||||
|
|
||||||
@patch.object(signature.extraction, 'has_signature')
|
@patch.object(extraction, 'has_signature')
|
||||||
def test_signature_extract_crash(has_signature):
|
def test_signature_extract_crash(has_signature):
|
||||||
has_signature.side_effect = Exception('Bam!')
|
has_signature.side_effect = Exception('Bam!')
|
||||||
msg_body = u'Blah\r\n--\r\n\r\nСергей'
|
msg_body = u'Blah\r\n--\r\n\r\nСергей'
|
||||||
eq_((msg_body, None), signature.extract(msg_body, 'Сергей'))
|
eq_((msg_body, None), extract(msg_body, 'Сергей'))
|
||||||
|
|
||||||
|
|
||||||
def test_mark_lines():
|
def test_mark_lines():
|
||||||
|
|||||||
@@ -35,6 +35,19 @@ On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> wrote:
|
|||||||
|
|
||||||
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
def test_pattern_on_date_polymail():
|
||||||
|
msg_body = """Test reply
|
||||||
|
|
||||||
|
On Tue, Apr 11, 2017 at 10:07 PM John Smith
|
||||||
|
|
||||||
|
<
|
||||||
|
mailto:John Smith <johnsmith@gmail.com>
|
||||||
|
> wrote:
|
||||||
|
Test quoted data
|
||||||
|
"""
|
||||||
|
|
||||||
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
def test_pattern_sent_from_samsung_smb_wrote():
|
def test_pattern_sent_from_samsung_smb_wrote():
|
||||||
msg_body = """Test reply
|
msg_body = """Test reply
|
||||||
@@ -106,6 +119,38 @@ On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> sent:
|
|||||||
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
|
def test_appointment():
|
||||||
|
msg_body = """Response
|
||||||
|
|
||||||
|
10/19/2017 @ 9:30 am for physical therapy
|
||||||
|
Bla
|
||||||
|
1517 4th Avenue Ste 300
|
||||||
|
London CA 19129, 555-421-6780
|
||||||
|
|
||||||
|
John Doe, FCLS
|
||||||
|
Mailgun Inc
|
||||||
|
555-941-0697
|
||||||
|
|
||||||
|
From: from@example.com [mailto:from@example.com]
|
||||||
|
Sent: Wednesday, October 18, 2017 2:05 PM
|
||||||
|
To: John Doer - SIU <jd@example.com>
|
||||||
|
Subject: RE: Claim # 5551188-1
|
||||||
|
|
||||||
|
Text"""
|
||||||
|
|
||||||
|
expected = """Response
|
||||||
|
|
||||||
|
10/19/2017 @ 9:30 am for physical therapy
|
||||||
|
Bla
|
||||||
|
1517 4th Avenue Ste 300
|
||||||
|
London CA 19129, 555-421-6780
|
||||||
|
|
||||||
|
John Doe, FCLS
|
||||||
|
Mailgun Inc
|
||||||
|
555-941-0697"""
|
||||||
|
eq_(expected, quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
def test_line_starts_with_on():
|
def test_line_starts_with_on():
|
||||||
msg_body = """Blah-blah-blah
|
msg_body = """Blah-blah-blah
|
||||||
On blah-blah-blah"""
|
On blah-blah-blah"""
|
||||||
@@ -142,7 +187,8 @@ def _check_pattern_original_message(original_message_indicator):
|
|||||||
-----{}-----
|
-----{}-----
|
||||||
|
|
||||||
Test"""
|
Test"""
|
||||||
eq_('Test reply', quotations.extract_from_plain(msg_body.format(six.text_type(original_message_indicator))))
|
eq_('Test reply', quotations.extract_from_plain(
|
||||||
|
msg_body.format(six.text_type(original_message_indicator))))
|
||||||
|
|
||||||
def test_english_original_message():
|
def test_english_original_message():
|
||||||
_check_pattern_original_message('Original Message')
|
_check_pattern_original_message('Original Message')
|
||||||
@@ -165,6 +211,17 @@ Test reply"""
|
|||||||
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
|
def test_android_wrote():
|
||||||
|
msg_body = """Test reply
|
||||||
|
|
||||||
|
---- John Smith wrote ----
|
||||||
|
|
||||||
|
> quoted
|
||||||
|
> text
|
||||||
|
"""
|
||||||
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
def test_reply_wraps_quotations():
|
def test_reply_wraps_quotations():
|
||||||
msg_body = """Test reply
|
msg_body = """Test reply
|
||||||
|
|
||||||
@@ -376,6 +433,14 @@ Op 17-feb.-2015, om 13:18 heeft Julius Caesar <pantheon@rome.com> het volgende g
|
|||||||
Small batch beard laboris tempor, non listicle hella Tumblr heirloom.
|
Small batch beard laboris tempor, non listicle hella Tumblr heirloom.
|
||||||
"""))
|
"""))
|
||||||
|
|
||||||
|
def test_vietnamese_from_block():
|
||||||
|
eq_('Hello', quotations.extract_from_plain(
|
||||||
|
u"""Hello
|
||||||
|
|
||||||
|
Vào 14:24 8 tháng 6, 2017, Hùng Nguyễn <hungnguyen@xxx.com> đã viết:
|
||||||
|
|
||||||
|
> Xin chào
|
||||||
|
"""))
|
||||||
|
|
||||||
def test_quotation_marker_false_positive():
|
def test_quotation_marker_false_positive():
|
||||||
msg_body = """Visit us now for assistance...
|
msg_body = """Visit us now for assistance...
|
||||||
@@ -388,6 +453,7 @@ def test_link_closed_with_quotation_marker_on_new_line():
|
|||||||
msg_body = '''8.45am-1pm
|
msg_body = '''8.45am-1pm
|
||||||
|
|
||||||
From: somebody@example.com
|
From: somebody@example.com
|
||||||
|
Date: Wed, 16 May 2012 00:15:02 -0600
|
||||||
|
|
||||||
<http://email.example.com/c/dHJhY2tpbmdfY29kZT1mMDdjYzBmNzM1ZjYzMGIxNT
|
<http://email.example.com/c/dHJhY2tpbmdfY29kZT1mMDdjYzBmNzM1ZjYzMGIxNT
|
||||||
> <bob@example.com <mailto:bob@example.com> >
|
> <bob@example.com <mailto:bob@example.com> >
|
||||||
@@ -429,7 +495,9 @@ def test_from_block_starts_with_date():
|
|||||||
msg_body = """Blah
|
msg_body = """Blah
|
||||||
|
|
||||||
Date: Wed, 16 May 2012 00:15:02 -0600
|
Date: Wed, 16 May 2012 00:15:02 -0600
|
||||||
To: klizhentas@example.com"""
|
To: klizhentas@example.com
|
||||||
|
|
||||||
|
"""
|
||||||
eq_('Blah', quotations.extract_from_plain(msg_body))
|
eq_('Blah', quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
@@ -499,11 +567,12 @@ def test_mark_message_lines():
|
|||||||
# next line should be marked as splitter
|
# next line should be marked as splitter
|
||||||
'_____________',
|
'_____________',
|
||||||
'From: foo@bar.com',
|
'From: foo@bar.com',
|
||||||
|
'Date: Wed, 16 May 2012 00:15:02 -0600',
|
||||||
'',
|
'',
|
||||||
'> Hi',
|
'> Hi',
|
||||||
'',
|
'',
|
||||||
'Signature']
|
'Signature']
|
||||||
eq_('tessemet', quotations.mark_message_lines(lines))
|
eq_('tesssemet', quotations.mark_message_lines(lines))
|
||||||
|
|
||||||
lines = ['Just testing the email reply',
|
lines = ['Just testing the email reply',
|
||||||
'',
|
'',
|
||||||
@@ -696,3 +765,77 @@ def test_standard_replies():
|
|||||||
"'%(reply)s' != %(stripped)s for %(fn)s" % \
|
"'%(reply)s' != %(stripped)s for %(fn)s" % \
|
||||||
{'reply': reply_text, 'stripped': stripped_text,
|
{'reply': reply_text, 'stripped': stripped_text,
|
||||||
'fn': filename}
|
'fn': filename}
|
||||||
|
|
||||||
|
|
||||||
|
def test_split_email():
|
||||||
|
msg = """From: Mr. X
|
||||||
|
Date: 24 February 2016
|
||||||
|
To: Mr. Y
|
||||||
|
Subject: Hi
|
||||||
|
Attachments: none
|
||||||
|
Goodbye.
|
||||||
|
From: Mr. Y
|
||||||
|
To: Mr. X
|
||||||
|
Date: 24 February 2016
|
||||||
|
Subject: Hi
|
||||||
|
Attachments: none
|
||||||
|
|
||||||
|
Hello.
|
||||||
|
|
||||||
|
On 24th February 2016 at 09.32am, Conal wrote:
|
||||||
|
|
||||||
|
Hey!
|
||||||
|
|
||||||
|
On Mon, 2016-10-03 at 09:45 -0600, Stangel, Dan wrote:
|
||||||
|
> Mohan,
|
||||||
|
>
|
||||||
|
> We have not yet migrated the systems.
|
||||||
|
>
|
||||||
|
> Dan
|
||||||
|
>
|
||||||
|
> > -----Original Message-----
|
||||||
|
> > Date: Mon, 2 Apr 2012 17:44:22 +0400
|
||||||
|
> > Subject: Test
|
||||||
|
> > From: bob@xxx.mailgun.org
|
||||||
|
> > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
|
||||||
|
> >
|
||||||
|
> > Hi
|
||||||
|
> >
|
||||||
|
> > > From: bob@xxx.mailgun.org
|
||||||
|
> > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
|
||||||
|
> > > Date: Mon, 2 Apr 2012 17:44:22 +0400
|
||||||
|
> > > Subject: Test
|
||||||
|
> > > Hi
|
||||||
|
> > >
|
||||||
|
> >
|
||||||
|
>
|
||||||
|
>
|
||||||
|
"""
|
||||||
|
expected_markers = "stttttsttttetesetesmmmmmmsmmmmmmmmmmmmmmmm"
|
||||||
|
markers = quotations.split_emails(msg)
|
||||||
|
eq_(markers, expected_markers)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def test_feedback_below_left_unparsed():
|
||||||
|
msg_body = """Please enter your feedback below. Thank you.
|
||||||
|
|
||||||
|
------------------------------------- Enter Feedback Below -------------------------------------
|
||||||
|
|
||||||
|
The user experience was unparallelled. Please continue production. I'm sending payment to ensure
|
||||||
|
that this line is intact."""
|
||||||
|
|
||||||
|
parsed = quotations.extract_from_plain(msg_body)
|
||||||
|
eq_(msg_body, parsed)
|
||||||
|
|
||||||
|
|
||||||
|
def test_appointment_2():
|
||||||
|
msg_body = """Invitation for an interview:
|
||||||
|
|
||||||
|
Date: Wednesday 3, October 2011
|
||||||
|
Time: 7 : 00am
|
||||||
|
Address: 130 Fox St
|
||||||
|
|
||||||
|
Please bring in your ID."""
|
||||||
|
parsed = quotations.extract_from_plain(msg_body)
|
||||||
|
eq_(msg_body, parsed)
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
# coding:utf-8
|
# coding:utf-8
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from . import *
|
|
||||||
|
|
||||||
from talon import utils as u
|
from talon import utils as u
|
||||||
import cchardet
|
from . import *
|
||||||
import six
|
|
||||||
from lxml import html
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_delimiter():
|
def test_get_delimiter():
|
||||||
@@ -15,54 +12,6 @@ def test_get_delimiter():
|
|||||||
eq_('\n', u.get_delimiter('abc'))
|
eq_('\n', u.get_delimiter('abc'))
|
||||||
|
|
||||||
|
|
||||||
def test_unicode():
|
|
||||||
eq_ (u'hi', u.to_unicode('hi'))
|
|
||||||
eq_ (type(u.to_unicode('hi')), six.text_type )
|
|
||||||
eq_ (type(u.to_unicode(u'hi')), six.text_type )
|
|
||||||
eq_ (type(u.to_unicode('привет')), six.text_type )
|
|
||||||
eq_ (type(u.to_unicode(u'привет')), six.text_type )
|
|
||||||
eq_ (u"привет", u.to_unicode('привет'))
|
|
||||||
eq_ (u"привет", u.to_unicode(u'привет'))
|
|
||||||
# some latin1 stuff
|
|
||||||
eq_ (u"Versión", u.to_unicode(u'Versi\xf3n'.encode('iso-8859-2'), precise=True))
|
|
||||||
|
|
||||||
|
|
||||||
def test_detect_encoding():
|
|
||||||
eq_ ('ascii', u.detect_encoding(b'qwe').lower())
|
|
||||||
eq_ ('iso-8859-2', u.detect_encoding(u'Versi\xf3n'.encode('iso-8859-2')).lower())
|
|
||||||
eq_ ('utf-8', u.detect_encoding(u'привет'.encode('utf8')).lower())
|
|
||||||
# fallback to utf-8
|
|
||||||
with patch.object(u.chardet, 'detect') as detect:
|
|
||||||
detect.side_effect = Exception
|
|
||||||
eq_ ('utf-8', u.detect_encoding('qwe'.encode('utf8')).lower())
|
|
||||||
|
|
||||||
|
|
||||||
def test_quick_detect_encoding():
|
|
||||||
eq_ ('ascii', u.quick_detect_encoding(b'qwe').lower())
|
|
||||||
eq_ ('windows-1252', u.quick_detect_encoding(u'Versi\xf3n'.encode('windows-1252')).lower())
|
|
||||||
eq_ ('utf-8', u.quick_detect_encoding(u'привет'.encode('utf8')).lower())
|
|
||||||
|
|
||||||
|
|
||||||
@patch.object(cchardet, 'detect')
|
|
||||||
@patch.object(u, 'detect_encoding')
|
|
||||||
def test_quick_detect_encoding_edge_cases(detect_encoding, cchardet_detect):
|
|
||||||
cchardet_detect.return_value = {'encoding': 'ascii'}
|
|
||||||
eq_('ascii', u.quick_detect_encoding(b"qwe"))
|
|
||||||
cchardet_detect.assert_called_once_with(b"qwe")
|
|
||||||
|
|
||||||
# fallback to detect_encoding
|
|
||||||
cchardet_detect.return_value = {}
|
|
||||||
detect_encoding.return_value = 'utf-8'
|
|
||||||
eq_('utf-8', u.quick_detect_encoding(b"qwe"))
|
|
||||||
|
|
||||||
# exception
|
|
||||||
detect_encoding.reset_mock()
|
|
||||||
cchardet_detect.side_effect = Exception()
|
|
||||||
detect_encoding.return_value = 'utf-8'
|
|
||||||
eq_('utf-8', u.quick_detect_encoding(b"qwe"))
|
|
||||||
ok_(detect_encoding.called)
|
|
||||||
|
|
||||||
|
|
||||||
def test_html_to_text():
|
def test_html_to_text():
|
||||||
html = """<body>
|
html = """<body>
|
||||||
<p>Hello world!</p>
|
<p>Hello world!</p>
|
||||||
@@ -76,11 +25,11 @@ Haha
|
|||||||
</p>
|
</p>
|
||||||
</body>"""
|
</body>"""
|
||||||
text = u.html_to_text(html)
|
text = u.html_to_text(html)
|
||||||
eq_(b"Hello world! \n\n * One! \n * Two \nHaha", text)
|
eq_("Hello world! \n\n * One! \n * Two \nHaha", text)
|
||||||
eq_(u"привет!", u.html_to_text("<b>привет!</b>").decode('utf8'))
|
eq_(u"привет!", u.html_to_text("<b>привет!</b>"))
|
||||||
|
|
||||||
html = '<body><br/><br/>Hi</body>'
|
html = '<body><br/><br/>Hi</body>'
|
||||||
eq_ (b'Hi', u.html_to_text(html))
|
eq_('Hi', u.html_to_text(html))
|
||||||
|
|
||||||
html = """Hi
|
html = """Hi
|
||||||
<style type="text/css">
|
<style type="text/css">
|
||||||
@@ -100,17 +49,23 @@ font: 13px 'Lucida Grande', Arial, sans-serif;
|
|||||||
|
|
||||||
}
|
}
|
||||||
</style>"""
|
</style>"""
|
||||||
eq_ (b'Hi', u.html_to_text(html))
|
eq_('Hi', u.html_to_text(html))
|
||||||
|
|
||||||
html = """<div>
|
html = """<div>
|
||||||
<!-- COMMENT 1 -->
|
<!-- COMMENT 1 -->
|
||||||
<span>TEXT 1</span>
|
<span>TEXT 1</span>
|
||||||
<p>TEXT 2 <!-- COMMENT 2 --></p>
|
<p>TEXT 2 <!-- COMMENT 2 --></p>
|
||||||
</div>"""
|
</div>"""
|
||||||
eq_(b'TEXT 1 \nTEXT 2', u.html_to_text(html))
|
eq_('TEXT 1 \nTEXT 2', u.html_to_text(html))
|
||||||
|
|
||||||
|
|
||||||
def test_comment_no_parent():
|
def test_comment_no_parent():
|
||||||
s = "<!-- COMMENT 1 --> no comment"
|
s = '<!-- COMMENT 1 --> no comment'
|
||||||
d = html.document_fromstring(s)
|
d = u.html_document_fromstring(s)
|
||||||
eq_("no comment", u.html_tree_to_text(d))
|
eq_("no comment", u.html_tree_to_text(d))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u, 'html_fromstring', Mock(return_value=None))
|
||||||
|
def test_bad_html_to_text():
|
||||||
|
bad_html = "one<br>two<br>three"
|
||||||
|
eq_(None, u.html_to_text(bad_html))
|
||||||
|
|||||||
Reference in New Issue
Block a user