Compare commits
102 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
061e549ad7 | ||
|
|
49d1a5d248 | ||
|
|
03d6b00db8 | ||
|
|
a2eb0f7201 | ||
|
|
5c71a0ca07 | ||
|
|
489d16fad9 | ||
|
|
a458707777 | ||
|
|
a1d0a86305 | ||
|
|
29f1d21be7 | ||
|
|
34c5b526c3 | ||
|
|
3edb6578ba | ||
|
|
984c036b6e | ||
|
|
a403ecb5c9 | ||
|
|
a44713409c | ||
|
|
567467b8ed | ||
|
|
139edd6104 | ||
|
|
e756d55abf | ||
|
|
015c8d2a78 | ||
|
|
5af846c13d | ||
|
|
e69a9c7a54 | ||
|
|
23cb2a9a53 | ||
|
|
b5e3397b88 | ||
|
|
5685a4055a | ||
|
|
97b72ef767 | ||
|
|
31489848be | ||
|
|
e5988d447b | ||
|
|
adfed748ce | ||
|
|
2444ba87c0 | ||
|
|
534457e713 | ||
|
|
ea82a9730e | ||
|
|
f04b872e14 | ||
|
|
e61894e425 | ||
|
|
35fbdaadac | ||
|
|
8441bc7328 | ||
|
|
37c95ff97b | ||
|
|
5b1ca33c57 | ||
|
|
ec8e09b34e | ||
|
|
bcf97eccfa | ||
|
|
f53b5cc7a6 | ||
|
|
27adde7aa7 | ||
|
|
a9719833e0 | ||
|
|
7bf37090ca | ||
|
|
44fcef7123 | ||
|
|
69a44b10a1 | ||
|
|
b085e3d049 | ||
|
|
4b953bcddc | ||
|
|
315eaa7080 | ||
|
|
5a9bc967f1 | ||
|
|
a0d7236d0b | ||
|
|
21e9a31ffe | ||
|
|
4ee46c0a97 | ||
|
|
10d9a930f9 | ||
|
|
a21ccdb21b | ||
|
|
7cdd7a8f35 | ||
|
|
01e03a47e0 | ||
|
|
1b9a71551a | ||
|
|
911efd1db4 | ||
|
|
e61f0a68c4 | ||
|
|
cefbcffd59 | ||
|
|
622a98d6d5 | ||
|
|
7901f5d1dc | ||
|
|
555c34d7a8 | ||
|
|
dcc0d1de20 | ||
|
|
7bdf4d622b | ||
|
|
4a7207b0d0 | ||
|
|
ad9c2ca0e8 | ||
|
|
da998ddb60 | ||
|
|
07f68815df | ||
|
|
35645f9ade | ||
|
|
7c3d91301c | ||
|
|
5bcf7403ad | ||
|
|
2d6c092b65 | ||
|
|
6d0689cad6 | ||
|
|
3f80e93ee0 | ||
|
|
1b18abab1d | ||
|
|
03dd5af5ab | ||
|
|
dfba82b07c | ||
|
|
08ca02c87f | ||
|
|
b61f4ec095 | ||
|
|
9dbe6a494b | ||
|
|
44e70939d6 | ||
|
|
ab6066eafa | ||
|
|
42258cdd36 | ||
|
|
d3de9e6893 | ||
|
|
333beb94af | ||
|
|
f3c0942c49 | ||
|
|
02adf53ab9 | ||
|
|
3497b5cab4 | ||
|
|
9c17dca17c | ||
|
|
de342d3177 | ||
|
|
743b452daf | ||
|
|
c762f3c337 | ||
|
|
31803d41bc | ||
|
|
2ecd9779fc | ||
|
|
5a7047233e | ||
|
|
999e9c3725 | ||
|
|
f6940fe878 | ||
|
|
ce65ff8fc8 | ||
|
|
eed6784f25 | ||
|
|
3d9ae356ea | ||
|
|
f688d074b5 | ||
|
|
41457d8fbd |
@@ -1,5 +1,3 @@
|
|||||||
recursive-include tests *
|
|
||||||
recursive-include talon *
|
|
||||||
recursive-exclude tests *.pyc *~
|
recursive-exclude tests *.pyc *~
|
||||||
recursive-exclude talon *.pyc *~
|
recursive-exclude talon *.pyc *~
|
||||||
include train.data
|
include train.data
|
||||||
|
|||||||
13
README.rst
13
README.rst
@@ -95,7 +95,7 @@ classifiers. The core of machine learning algorithm lays in
|
|||||||
apply to a message (``featurespace.py``), how data sets are built
|
apply to a message (``featurespace.py``), how data sets are built
|
||||||
(``dataset.py``), classifier’s interface (``classifier.py``).
|
(``dataset.py``), classifier’s interface (``classifier.py``).
|
||||||
|
|
||||||
The data used for training is taken from our personal email
|
Currently the data used for training is taken from our personal email
|
||||||
conversations and from `ENRON`_ dataset. As a result of applying our set
|
conversations and from `ENRON`_ dataset. As a result of applying our set
|
||||||
of features to the dataset we provide files ``classifier`` and
|
of features to the dataset we provide files ``classifier`` and
|
||||||
``train.data`` that don’t have any personal information but could be
|
``train.data`` that don’t have any personal information but could be
|
||||||
@@ -116,8 +116,19 @@ or
|
|||||||
from talon.signature.learning.classifier import train, init
|
from talon.signature.learning.classifier import train, init
|
||||||
train(init(), EXTRACTOR_DATA, EXTRACTOR_FILENAME)
|
train(init(), EXTRACTOR_DATA, EXTRACTOR_FILENAME)
|
||||||
|
|
||||||
|
Open-source Dataset
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Recently we started a `forge`_ project to create an open-source, annotated dataset of raw emails. In the project we
|
||||||
|
used a subset of `ENRON`_ data, cleansed of private, health and financial information by `EDRM`_. At the moment over 190
|
||||||
|
emails are annotated. Any contribution and collaboration on the project are welcome. Once the dataset is ready we plan to
|
||||||
|
start using it for talon.
|
||||||
|
|
||||||
.. _scikit-learn: http://scikit-learn.org
|
.. _scikit-learn: http://scikit-learn.org
|
||||||
.. _ENRON: https://www.cs.cmu.edu/~enron/
|
.. _ENRON: https://www.cs.cmu.edu/~enron/
|
||||||
|
.. _EDRM: http://www.edrm.net/resources/data-sets/edrm-enron-email-data-set
|
||||||
|
.. _forge: https://github.com/mailgun/forge
|
||||||
|
|
||||||
|
|
||||||
Research
|
Research
|
||||||
--------
|
--------
|
||||||
|
|||||||
39
setup.py
39
setup.py
@@ -1,8 +1,35 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
from setuptools import setup, find_packages
|
from setuptools import setup, find_packages
|
||||||
|
from setuptools.command.install import install
|
||||||
|
|
||||||
|
|
||||||
|
class InstallCommand(install):
|
||||||
|
user_options = install.user_options + [
|
||||||
|
('no-ml', None, "Don't install without Machine Learning modules."),
|
||||||
|
]
|
||||||
|
|
||||||
|
boolean_options = install.boolean_options + ['no-ml']
|
||||||
|
|
||||||
|
def initialize_options(self):
|
||||||
|
install.initialize_options(self)
|
||||||
|
self.no_ml = None
|
||||||
|
|
||||||
|
def finalize_options(self):
|
||||||
|
install.finalize_options(self)
|
||||||
|
if self.no_ml:
|
||||||
|
dist = self.distribution
|
||||||
|
dist.packages=find_packages(exclude=[
|
||||||
|
'tests',
|
||||||
|
'tests.*',
|
||||||
|
'talon.signature',
|
||||||
|
'talon.signature.*',
|
||||||
|
])
|
||||||
|
for not_required in ['numpy', 'scipy', 'scikit-learn==0.16.1']:
|
||||||
|
dist.install_requires.remove(not_required)
|
||||||
|
|
||||||
|
|
||||||
setup(name='talon',
|
setup(name='talon',
|
||||||
version='1.0.9',
|
version='1.3.4',
|
||||||
description=("Mailgun library "
|
description=("Mailgun library "
|
||||||
"to extract message quotations and signatures."),
|
"to extract message quotations and signatures."),
|
||||||
long_description=open("README.rst").read(),
|
long_description=open("README.rst").read(),
|
||||||
@@ -10,19 +37,23 @@ setup(name='talon',
|
|||||||
author_email='admin@mailgunhq.com',
|
author_email='admin@mailgunhq.com',
|
||||||
url='https://github.com/mailgun/talon',
|
url='https://github.com/mailgun/talon',
|
||||||
license='APACHE2',
|
license='APACHE2',
|
||||||
packages=find_packages(exclude=['tests']),
|
cmdclass={
|
||||||
|
'install': InstallCommand,
|
||||||
|
},
|
||||||
|
packages=find_packages(exclude=['tests', 'tests.*']),
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
zip_safe=True,
|
zip_safe=True,
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"lxml>=2.3.3",
|
"lxml>=2.3.3",
|
||||||
"regex>=1",
|
"regex>=1",
|
||||||
"html2text",
|
|
||||||
"numpy",
|
"numpy",
|
||||||
"scipy",
|
"scipy",
|
||||||
"scikit-learn==0.16.1", # pickled versions of classifier, else rebuild
|
"scikit-learn==0.16.1", # pickled versions of classifier, else rebuild
|
||||||
'chardet>=1.0.1',
|
'chardet>=1.0.1',
|
||||||
'cchardet>=0.3.5',
|
'cchardet>=0.3.5',
|
||||||
'cssselect'
|
'cssselect',
|
||||||
|
'six>=1.10.0',
|
||||||
|
'html5lib'
|
||||||
],
|
],
|
||||||
tests_require=[
|
tests_require=[
|
||||||
"mock",
|
"mock",
|
||||||
|
|||||||
@@ -1,7 +1,13 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
from talon.quotations import register_xpath_extensions
|
from talon.quotations import register_xpath_extensions
|
||||||
from talon import signature
|
try:
|
||||||
|
from talon import signature
|
||||||
|
ML_ENABLED = True
|
||||||
|
except ImportError:
|
||||||
|
ML_ENABLED = False
|
||||||
|
|
||||||
|
|
||||||
def init():
|
def init():
|
||||||
register_xpath_extensions()
|
register_xpath_extensions()
|
||||||
signature.initialize()
|
if ML_ENABLED:
|
||||||
|
signature.initialize()
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ The module's functions operate on message bodies trying to extract original
|
|||||||
messages (without quoted messages) from html
|
messages (without quoted messages) from html
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
|
from talon.utils import cssselect
|
||||||
|
|
||||||
CHECKPOINT_PREFIX = '#!%!'
|
CHECKPOINT_PREFIX = '#!%!'
|
||||||
CHECKPOINT_SUFFIX = '!%!#'
|
CHECKPOINT_SUFFIX = '!%!#'
|
||||||
@@ -12,6 +14,7 @@ CHECKPOINT_PATTERN = re.compile(CHECKPOINT_PREFIX + '\d+' + CHECKPOINT_SUFFIX)
|
|||||||
|
|
||||||
# HTML quote indicators (tag ids)
|
# HTML quote indicators (tag ids)
|
||||||
QUOTE_IDS = ['OLK_SRC_BODY_SECTION']
|
QUOTE_IDS = ['OLK_SRC_BODY_SECTION']
|
||||||
|
RE_FWD = re.compile("^[-]+[ ]*Forwarded message[ ]*[-]+$", re.I | re.M)
|
||||||
|
|
||||||
|
|
||||||
def add_checkpoint(html_note, counter):
|
def add_checkpoint(html_note, counter):
|
||||||
@@ -76,8 +79,8 @@ def delete_quotation_tags(html_note, counter, quotation_checkpoints):
|
|||||||
|
|
||||||
def cut_gmail_quote(html_message):
|
def cut_gmail_quote(html_message):
|
||||||
''' Cuts the outermost block element with class gmail_quote. '''
|
''' Cuts the outermost block element with class gmail_quote. '''
|
||||||
gmail_quote = html_message.cssselect('.gmail_quote')
|
gmail_quote = cssselect('div.gmail_quote', html_message)
|
||||||
if gmail_quote:
|
if gmail_quote and (gmail_quote[0].text is None or not RE_FWD.match(gmail_quote[0].text)):
|
||||||
gmail_quote[0].getparent().remove(gmail_quote[0])
|
gmail_quote[0].getparent().remove(gmail_quote[0])
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -85,9 +88,12 @@ def cut_gmail_quote(html_message):
|
|||||||
def cut_microsoft_quote(html_message):
|
def cut_microsoft_quote(html_message):
|
||||||
''' Cuts splitter block and all following blocks. '''
|
''' Cuts splitter block and all following blocks. '''
|
||||||
splitter = html_message.xpath(
|
splitter = html_message.xpath(
|
||||||
#outlook 2007, 2010
|
#outlook 2007, 2010 (international)
|
||||||
"//div[@style='border:none;border-top:solid #B5C4DF 1.0pt;"
|
"//div[@style='border:none;border-top:solid #B5C4DF 1.0pt;"
|
||||||
"padding:3.0pt 0cm 0cm 0cm']|"
|
"padding:3.0pt 0cm 0cm 0cm']|"
|
||||||
|
#outlook 2007, 2010 (american)
|
||||||
|
"//div[@style='border:none;border-top:solid #B5C4DF 1.0pt;"
|
||||||
|
"padding:3.0pt 0in 0in 0in']|"
|
||||||
#windows mail
|
#windows mail
|
||||||
"//div[@style='padding-top: 5px; "
|
"//div[@style='padding-top: 5px; "
|
||||||
"border-top-color: rgb(229, 229, 229); "
|
"border-top-color: rgb(229, 229, 229); "
|
||||||
@@ -130,7 +136,7 @@ def cut_microsoft_quote(html_message):
|
|||||||
def cut_by_id(html_message):
|
def cut_by_id(html_message):
|
||||||
found = False
|
found = False
|
||||||
for quote_id in QUOTE_IDS:
|
for quote_id in QUOTE_IDS:
|
||||||
quote = html_message.cssselect('#{}'.format(quote_id))
|
quote = cssselect('#{}'.format(quote_id), html_message)
|
||||||
if quote:
|
if quote:
|
||||||
found = True
|
found = True
|
||||||
quote[0].getparent().remove(quote[0])
|
quote[0].getparent().remove(quote[0])
|
||||||
@@ -138,8 +144,12 @@ def cut_by_id(html_message):
|
|||||||
|
|
||||||
|
|
||||||
def cut_blockquote(html_message):
|
def cut_blockquote(html_message):
|
||||||
''' Cuts the last non-nested blockquote with wrapping elements. '''
|
''' Cuts the last non-nested blockquote with wrapping elements.'''
|
||||||
quote = html_message.xpath('(.//blockquote)[not(ancestor::blockquote)][last()]')
|
quote = html_message.xpath(
|
||||||
|
'(.//blockquote)'
|
||||||
|
'[not(@class="gmail_quote") and not(ancestor::blockquote)]'
|
||||||
|
'[last()]')
|
||||||
|
|
||||||
if quote:
|
if quote:
|
||||||
quote = quote[0]
|
quote = quote[0]
|
||||||
quote.getparent().remove(quote)
|
quote.getparent().remove(quote)
|
||||||
@@ -155,21 +165,58 @@ def cut_from_block(html_message):
|
|||||||
|
|
||||||
if block:
|
if block:
|
||||||
block = block[-1]
|
block = block[-1]
|
||||||
|
parent_div = None
|
||||||
while block.getparent() is not None:
|
while block.getparent() is not None:
|
||||||
if block.tag == 'div':
|
if block.tag == 'div':
|
||||||
block.getparent().remove(block)
|
parent_div = block
|
||||||
|
break
|
||||||
|
block = block.getparent()
|
||||||
|
if parent_div is not None:
|
||||||
|
maybe_body = parent_div.getparent()
|
||||||
|
# In cases where removing this enclosing div will remove all
|
||||||
|
# content, we should assume the quote is not enclosed in a tag.
|
||||||
|
parent_div_is_all_content = (
|
||||||
|
maybe_body is not None and maybe_body.tag == 'body' and
|
||||||
|
len(maybe_body.getchildren()) == 1)
|
||||||
|
|
||||||
|
if not parent_div_is_all_content:
|
||||||
|
parent = block.getparent()
|
||||||
|
next_sibling = block.getnext()
|
||||||
|
|
||||||
|
# remove all tags after found From block
|
||||||
|
# (From block and quoted message are in separate divs)
|
||||||
|
while next_sibling is not None:
|
||||||
|
parent.remove(block)
|
||||||
|
block = next_sibling
|
||||||
|
next_sibling = block.getnext()
|
||||||
|
|
||||||
|
# remove the last sibling (or the
|
||||||
|
# From block if no siblings)
|
||||||
|
if block is not None:
|
||||||
|
parent.remove(block)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
block = block.getparent()
|
return False
|
||||||
else:
|
|
||||||
# handle the case when From: block goes right after e.g. <hr>
|
# handle the case when From: block goes right after e.g. <hr>
|
||||||
# and not enclosed in some tag
|
# and not enclosed in some tag
|
||||||
block = html_message.xpath(
|
block = html_message.xpath(
|
||||||
("//*[starts-with(mg:tail(), 'From:')]|"
|
("//*[starts-with(mg:tail(), 'From:')]|"
|
||||||
"//*[starts-with(mg:tail(), 'Date:')]"))
|
"//*[starts-with(mg:tail(), 'Date:')]"))
|
||||||
if block:
|
if block:
|
||||||
block = block[0]
|
block = block[0]
|
||||||
while(block.getnext() is not None):
|
|
||||||
block.getparent().remove(block.getnext())
|
if RE_FWD.match(block.getparent().text or ''):
|
||||||
block.getparent().remove(block)
|
return False
|
||||||
return True
|
|
||||||
|
while(block.getnext() is not None):
|
||||||
|
block.getparent().remove(block.getnext())
|
||||||
|
block.getparent().remove(block)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def cut_zimbra_quote(html_message):
|
||||||
|
zDivider = html_message.xpath('//hr[@data-marker="__DIVIDER__"]')
|
||||||
|
if zDivider:
|
||||||
|
zDivider[0].getparent().remove(zDivider[0])
|
||||||
|
return True
|
||||||
|
|||||||
@@ -5,15 +5,18 @@ The module's functions operate on message bodies trying to extract
|
|||||||
original messages (without quoted messages)
|
original messages (without quoted messages)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
import regex as re
|
import regex as re
|
||||||
import logging
|
import logging
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
from lxml import html, etree
|
from lxml import html, etree
|
||||||
import html2text
|
|
||||||
|
|
||||||
from talon.utils import get_delimiter
|
from talon.utils import (get_delimiter, html_tree_to_text,
|
||||||
|
html_document_fromstring)
|
||||||
from talon import html_quotations
|
from talon import html_quotations
|
||||||
|
from six.moves import range
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@@ -108,7 +111,7 @@ RE_EMPTY_QUOTATION = re.compile(
|
|||||||
(
|
(
|
||||||
# quotation border: splitter line or a number of quotation marker lines
|
# quotation border: splitter line or a number of quotation marker lines
|
||||||
(?:
|
(?:
|
||||||
s
|
(?:se*)+
|
||||||
|
|
|
|
||||||
(?:me*){2,}
|
(?:me*){2,}
|
||||||
)
|
)
|
||||||
@@ -128,7 +131,7 @@ RE_ORIGINAL_MESSAGE = re.compile(u'[\s]*[-]+[ ]*({})[ ]*[-]+'.format(
|
|||||||
'Oprindelig meddelelse',
|
'Oprindelig meddelelse',
|
||||||
))), re.I)
|
))), re.I)
|
||||||
|
|
||||||
RE_FROM_COLON_OR_DATE_COLON = re.compile(u'(_+\r?\n)?[\s]*(:?[*]?{})[\s]?:[*]? .*'.format(
|
RE_FROM_COLON_OR_DATE_COLON = re.compile(u'(_+\r?\n)?[\s]*(:?[*]?{})[\s]?:[*]?.*'.format(
|
||||||
u'|'.join((
|
u'|'.join((
|
||||||
# "From" in different languages.
|
# "From" in different languages.
|
||||||
'From', 'Van', 'De', 'Von', 'Fra', u'Från',
|
'From', 'Van', 'De', 'Von', 'Fra', u'Från',
|
||||||
@@ -138,13 +141,20 @@ RE_FROM_COLON_OR_DATE_COLON = re.compile(u'(_+\r?\n)?[\s]*(:?[*]?{})[\s]?:[*]? .
|
|||||||
|
|
||||||
SPLITTER_PATTERNS = [
|
SPLITTER_PATTERNS = [
|
||||||
RE_ORIGINAL_MESSAGE,
|
RE_ORIGINAL_MESSAGE,
|
||||||
# <date> <person>
|
|
||||||
re.compile("(\d+/\d+/\d+|\d+\.\d+\.\d+).*@", re.VERBOSE),
|
|
||||||
RE_ON_DATE_SMB_WROTE,
|
RE_ON_DATE_SMB_WROTE,
|
||||||
RE_ON_DATE_WROTE_SMB,
|
RE_ON_DATE_WROTE_SMB,
|
||||||
RE_FROM_COLON_OR_DATE_COLON,
|
RE_FROM_COLON_OR_DATE_COLON,
|
||||||
|
# 02.04.2012 14:20 пользователь "bob@example.com" <
|
||||||
|
# bob@xxx.mailgun.org> написал:
|
||||||
|
re.compile("(\d+/\d+/\d+|\d+\.\d+\.\d+).*@", re.S),
|
||||||
|
# 2014-10-17 11:28 GMT+03:00 Bob <
|
||||||
|
# bob@example.com>:
|
||||||
|
re.compile("\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}\s+GMT.*@", re.S),
|
||||||
|
# Thu, 26 Jun 2014 14:00:51 +0400 Bob <bob@example.com>:
|
||||||
re.compile('\S{3,10}, \d\d? \S{3,10} 20\d\d,? \d\d?:\d\d(:\d\d)?'
|
re.compile('\S{3,10}, \d\d? \S{3,10} 20\d\d,? \d\d?:\d\d(:\d\d)?'
|
||||||
'( \S+){3,6}@\S+:')
|
'( \S+){3,6}@\S+:'),
|
||||||
|
# Sent from Samsung MobileName <address@example.com> wrote:
|
||||||
|
re.compile('Sent from Samsung .*@.*> wrote')
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -155,10 +165,16 @@ RE_PARENTHESIS_LINK = re.compile("\(https?://")
|
|||||||
|
|
||||||
SPLITTER_MAX_LINES = 4
|
SPLITTER_MAX_LINES = 4
|
||||||
MAX_LINES_COUNT = 1000
|
MAX_LINES_COUNT = 1000
|
||||||
|
# an extensive research shows that exceeding this limit
|
||||||
|
# leads to excessive processing time
|
||||||
|
MAX_HTML_LEN = 2794202
|
||||||
|
|
||||||
QUOT_PATTERN = re.compile('^>+ ?')
|
QUOT_PATTERN = re.compile('^>+ ?')
|
||||||
NO_QUOT_LINE = re.compile('^[^>].*[\S].*')
|
NO_QUOT_LINE = re.compile('^[^>].*[\S].*')
|
||||||
|
|
||||||
|
# Regular expression to identify if a line is a header.
|
||||||
|
RE_HEADER = re.compile(": ")
|
||||||
|
|
||||||
|
|
||||||
def extract_from(msg_body, content_type='text/plain'):
|
def extract_from(msg_body, content_type='text/plain'):
|
||||||
try:
|
try:
|
||||||
@@ -172,6 +188,19 @@ def extract_from(msg_body, content_type='text/plain'):
|
|||||||
return msg_body
|
return msg_body
|
||||||
|
|
||||||
|
|
||||||
|
def remove_initial_spaces_and_mark_message_lines(lines):
|
||||||
|
"""
|
||||||
|
Removes the initial spaces in each line before marking message lines.
|
||||||
|
|
||||||
|
This ensures headers can be identified if they are indented with spaces.
|
||||||
|
"""
|
||||||
|
i = 0
|
||||||
|
while i < len(lines):
|
||||||
|
lines[i] = lines[i].lstrip(' ')
|
||||||
|
i += 1
|
||||||
|
return mark_message_lines(lines)
|
||||||
|
|
||||||
|
|
||||||
def mark_message_lines(lines):
|
def mark_message_lines(lines):
|
||||||
"""Mark message lines with markers to distinguish quotation lines.
|
"""Mark message lines with markers to distinguish quotation lines.
|
||||||
|
|
||||||
@@ -185,7 +214,7 @@ def mark_message_lines(lines):
|
|||||||
>>> mark_message_lines(['answer', 'From: foo@bar.com', '', '> question'])
|
>>> mark_message_lines(['answer', 'From: foo@bar.com', '', '> question'])
|
||||||
'tsem'
|
'tsem'
|
||||||
"""
|
"""
|
||||||
markers = bytearray(len(lines))
|
markers = ['e' for _ in lines]
|
||||||
i = 0
|
i = 0
|
||||||
while i < len(lines):
|
while i < len(lines):
|
||||||
if not lines[i].strip():
|
if not lines[i].strip():
|
||||||
@@ -201,7 +230,7 @@ def mark_message_lines(lines):
|
|||||||
if splitter:
|
if splitter:
|
||||||
# append as many splitter markers as lines in splitter
|
# append as many splitter markers as lines in splitter
|
||||||
splitter_lines = splitter.group().splitlines()
|
splitter_lines = splitter.group().splitlines()
|
||||||
for j in xrange(len(splitter_lines)):
|
for j in range(len(splitter_lines)):
|
||||||
markers[i + j] = 's'
|
markers[i + j] = 's'
|
||||||
|
|
||||||
# skip splitter lines
|
# skip splitter lines
|
||||||
@@ -211,7 +240,7 @@ def mark_message_lines(lines):
|
|||||||
markers[i] = 't'
|
markers[i] = 't'
|
||||||
i += 1
|
i += 1
|
||||||
|
|
||||||
return markers
|
return ''.join(markers)
|
||||||
|
|
||||||
|
|
||||||
def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
|
def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
|
||||||
@@ -225,6 +254,7 @@ def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
|
|||||||
return_flags = [were_lines_deleted, first_deleted_line,
|
return_flags = [were_lines_deleted, first_deleted_line,
|
||||||
last_deleted_line]
|
last_deleted_line]
|
||||||
"""
|
"""
|
||||||
|
markers = ''.join(markers)
|
||||||
# if there are no splitter there should be no markers
|
# if there are no splitter there should be no markers
|
||||||
if 's' not in markers and not re.search('(me*){3}', markers):
|
if 's' not in markers and not re.search('(me*){3}', markers):
|
||||||
markers = markers.replace('m', 't')
|
markers = markers.replace('m', 't')
|
||||||
@@ -270,10 +300,27 @@ def preprocess(msg_body, delimiter, content_type='text/plain'):
|
|||||||
Replaces link brackets so that they couldn't be taken for quotation marker.
|
Replaces link brackets so that they couldn't be taken for quotation marker.
|
||||||
Splits line in two if splitter pattern preceded by some text on the same
|
Splits line in two if splitter pattern preceded by some text on the same
|
||||||
line (done only for 'On <date> <person> wrote:' pattern).
|
line (done only for 'On <date> <person> wrote:' pattern).
|
||||||
|
|
||||||
|
Converts msg_body into a unicode.
|
||||||
"""
|
"""
|
||||||
# normalize links i.e. replace '<', '>' wrapping the link with some symbols
|
msg_body = _replace_link_brackets(msg_body)
|
||||||
# so that '>' closing the link couldn't be mistakenly taken for quotation
|
|
||||||
# marker.
|
msg_body = _wrap_splitter_with_newline(msg_body, delimiter, content_type)
|
||||||
|
|
||||||
|
return msg_body
|
||||||
|
|
||||||
|
|
||||||
|
def _replace_link_brackets(msg_body):
|
||||||
|
"""
|
||||||
|
Normalize links i.e. replace '<', '>' wrapping the link with some symbols
|
||||||
|
so that '>' closing the link couldn't be mistakenly taken for quotation
|
||||||
|
marker.
|
||||||
|
|
||||||
|
Converts msg_body into a unicode
|
||||||
|
"""
|
||||||
|
if isinstance(msg_body, bytes):
|
||||||
|
msg_body = msg_body.decode('utf8')
|
||||||
|
|
||||||
def link_wrapper(link):
|
def link_wrapper(link):
|
||||||
newline_index = msg_body[:link.start()].rfind("\n")
|
newline_index = msg_body[:link.start()].rfind("\n")
|
||||||
if msg_body[newline_index + 1] == ">":
|
if msg_body[newline_index + 1] == ">":
|
||||||
@@ -282,7 +329,14 @@ def preprocess(msg_body, delimiter, content_type='text/plain'):
|
|||||||
return "@@%s@@" % link.group(1)
|
return "@@%s@@" % link.group(1)
|
||||||
|
|
||||||
msg_body = re.sub(RE_LINK, link_wrapper, msg_body)
|
msg_body = re.sub(RE_LINK, link_wrapper, msg_body)
|
||||||
|
return msg_body
|
||||||
|
|
||||||
|
|
||||||
|
def _wrap_splitter_with_newline(msg_body, delimiter, content_type='text/plain'):
|
||||||
|
"""
|
||||||
|
Splits line in two if splitter pattern preceded by some text on the same
|
||||||
|
line (done only for 'On <date> <person> wrote:' pattern.
|
||||||
|
"""
|
||||||
def splitter_wrapper(splitter):
|
def splitter_wrapper(splitter):
|
||||||
"""Wraps splitter with new line"""
|
"""Wraps splitter with new line"""
|
||||||
if splitter.start() and msg_body[splitter.start() - 1] != '\n':
|
if splitter.start() and msg_body[splitter.start() - 1] != '\n':
|
||||||
@@ -321,7 +375,37 @@ def extract_from_plain(msg_body):
|
|||||||
return msg_body
|
return msg_body
|
||||||
|
|
||||||
|
|
||||||
def extract_from_html(s):
|
def extract_from_html(msg_body):
|
||||||
|
"""
|
||||||
|
Extract not quoted message from provided html message body
|
||||||
|
using tags and plain text algorithm.
|
||||||
|
|
||||||
|
Cut out the 'blockquote', 'gmail_quote' tags.
|
||||||
|
Cut Microsoft quotations.
|
||||||
|
|
||||||
|
Then use plain text algorithm to cut out splitter or
|
||||||
|
leftover quotation.
|
||||||
|
This works by adding checkpoint text to all html tags,
|
||||||
|
then converting html to text,
|
||||||
|
then extracting quotations from text,
|
||||||
|
then checking deleted checkpoints,
|
||||||
|
then deleting necessary tags.
|
||||||
|
|
||||||
|
Returns a unicode string.
|
||||||
|
"""
|
||||||
|
if isinstance(msg_body, six.text_type):
|
||||||
|
msg_body = msg_body.encode('utf8')
|
||||||
|
elif not isinstance(msg_body, bytes):
|
||||||
|
msg_body = msg_body.encode('ascii')
|
||||||
|
|
||||||
|
result = _extract_from_html(msg_body)
|
||||||
|
if isinstance(result, bytes):
|
||||||
|
result = result.decode('utf8')
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_from_html(msg_body):
|
||||||
"""
|
"""
|
||||||
Extract not quoted message from provided html message body
|
Extract not quoted message from provided html message body
|
||||||
using tags and plain text algorithm.
|
using tags and plain text algorithm.
|
||||||
@@ -337,49 +421,33 @@ def extract_from_html(s):
|
|||||||
then checking deleted checkpoints,
|
then checking deleted checkpoints,
|
||||||
then deleting necessary tags.
|
then deleting necessary tags.
|
||||||
"""
|
"""
|
||||||
|
if msg_body.strip() == b'':
|
||||||
|
return msg_body
|
||||||
|
|
||||||
if s.strip() == '':
|
msg_body = msg_body.replace(b'\r\n', b'\n')
|
||||||
return s
|
html_tree = html_document_fromstring(msg_body)
|
||||||
|
|
||||||
# replace CRLF with LF temporaraly otherwise CR will be converted to ' '
|
if html_tree is None:
|
||||||
# when doing deepcopy on html tree
|
return msg_body
|
||||||
msg_body, replaced = _CRLF_to_LF(s)
|
|
||||||
|
|
||||||
html_tree = html.document_fromstring(
|
|
||||||
msg_body,
|
|
||||||
parser=html.HTMLParser(encoding="utf-8")
|
|
||||||
)
|
|
||||||
|
|
||||||
cut_quotations = (html_quotations.cut_gmail_quote(html_tree) or
|
cut_quotations = (html_quotations.cut_gmail_quote(html_tree) or
|
||||||
|
html_quotations.cut_zimbra_quote(html_tree) or
|
||||||
html_quotations.cut_blockquote(html_tree) or
|
html_quotations.cut_blockquote(html_tree) or
|
||||||
html_quotations.cut_microsoft_quote(html_tree) or
|
html_quotations.cut_microsoft_quote(html_tree) or
|
||||||
html_quotations.cut_by_id(html_tree) or
|
html_quotations.cut_by_id(html_tree) or
|
||||||
html_quotations.cut_from_block(html_tree)
|
html_quotations.cut_from_block(html_tree)
|
||||||
)
|
)
|
||||||
|
|
||||||
html_tree_copy = deepcopy(html_tree)
|
html_tree_copy = deepcopy(html_tree)
|
||||||
|
|
||||||
number_of_checkpoints = html_quotations.add_checkpoint(html_tree, 0)
|
number_of_checkpoints = html_quotations.add_checkpoint(html_tree, 0)
|
||||||
quotation_checkpoints = [False] * number_of_checkpoints
|
quotation_checkpoints = [False] * number_of_checkpoints
|
||||||
msg_with_checkpoints = html.tostring(html_tree)
|
plain_text = html_tree_to_text(html_tree)
|
||||||
|
|
||||||
h = html2text.HTML2Text()
|
|
||||||
h.body_width = 0 # generate plain text without wrap
|
|
||||||
|
|
||||||
# html2text adds unnecessary star symbols. Remove them.
|
|
||||||
# Mask star symbols
|
|
||||||
msg_with_checkpoints = msg_with_checkpoints.replace('*', '3423oorkg432')
|
|
||||||
plain_text = h.handle(msg_with_checkpoints)
|
|
||||||
# Remove created star symbols
|
|
||||||
plain_text = plain_text.replace('*', '')
|
|
||||||
# Unmask saved star symbols
|
|
||||||
plain_text = plain_text.replace('3423oorkg432', '*')
|
|
||||||
plain_text = preprocess(plain_text, '\n', content_type='text/html')
|
plain_text = preprocess(plain_text, '\n', content_type='text/html')
|
||||||
lines = plain_text.splitlines()
|
lines = plain_text.splitlines()
|
||||||
|
|
||||||
# Don't process too long messages
|
# Don't process too long messages
|
||||||
if len(lines) > MAX_LINES_COUNT:
|
if len(lines) > MAX_LINES_COUNT:
|
||||||
return s
|
return msg_body
|
||||||
|
|
||||||
# Collect checkpoints on each line
|
# Collect checkpoints on each line
|
||||||
line_checkpoints = [
|
line_checkpoints = [
|
||||||
@@ -397,25 +465,106 @@ def extract_from_html(s):
|
|||||||
process_marked_lines(lines, markers, return_flags)
|
process_marked_lines(lines, markers, return_flags)
|
||||||
lines_were_deleted, first_deleted, last_deleted = return_flags
|
lines_were_deleted, first_deleted, last_deleted = return_flags
|
||||||
|
|
||||||
|
if not lines_were_deleted and not cut_quotations:
|
||||||
|
return msg_body
|
||||||
|
|
||||||
if lines_were_deleted:
|
if lines_were_deleted:
|
||||||
#collect checkpoints from deleted lines
|
#collect checkpoints from deleted lines
|
||||||
for i in xrange(first_deleted, last_deleted):
|
for i in range(first_deleted, last_deleted):
|
||||||
for checkpoint in line_checkpoints[i]:
|
for checkpoint in line_checkpoints[i]:
|
||||||
quotation_checkpoints[checkpoint] = True
|
quotation_checkpoints[checkpoint] = True
|
||||||
else:
|
|
||||||
if cut_quotations:
|
|
||||||
return _restore_CRLF(html.tostring(html_tree_copy), replaced)
|
|
||||||
else:
|
|
||||||
return s
|
|
||||||
|
|
||||||
# Remove tags with quotation checkpoints
|
# Remove tags with quotation checkpoints
|
||||||
html_quotations.delete_quotation_tags(
|
html_quotations.delete_quotation_tags(
|
||||||
html_tree_copy, 0, quotation_checkpoints
|
html_tree_copy, 0, quotation_checkpoints
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if _readable_text_empty(html_tree_copy):
|
||||||
|
return msg_body
|
||||||
|
|
||||||
return html.tostring(html_tree_copy)
|
return html.tostring(html_tree_copy)
|
||||||
|
|
||||||
|
|
||||||
|
def split_emails(msg):
|
||||||
|
"""
|
||||||
|
Given a message (which may consist of an email conversation thread with
|
||||||
|
multiple emails), mark the lines to identify split lines, content lines and
|
||||||
|
empty lines.
|
||||||
|
|
||||||
|
Correct the split line markers inside header blocks. Header blocks are
|
||||||
|
identified by the regular expression RE_HEADER.
|
||||||
|
|
||||||
|
Return the corrected markers
|
||||||
|
"""
|
||||||
|
msg_body = _replace_link_brackets(msg)
|
||||||
|
|
||||||
|
# don't process too long messages
|
||||||
|
lines = msg_body.splitlines()[:MAX_LINES_COUNT]
|
||||||
|
markers = remove_initial_spaces_and_mark_message_lines(lines)
|
||||||
|
|
||||||
|
markers = _mark_quoted_email_splitlines(markers, lines)
|
||||||
|
|
||||||
|
# we don't want splitlines in header blocks
|
||||||
|
markers = _correct_splitlines_in_headers(markers, lines)
|
||||||
|
|
||||||
|
return markers
|
||||||
|
|
||||||
|
|
||||||
|
def _mark_quoted_email_splitlines(markers, lines):
|
||||||
|
"""
|
||||||
|
When there are headers indented with '>' characters, this method will
|
||||||
|
attempt to identify if the header is a splitline header. If it is, then we
|
||||||
|
mark it with 's' instead of leaving it as 'm' and return the new markers.
|
||||||
|
"""
|
||||||
|
# Create a list of markers to easily alter specific characters
|
||||||
|
markerlist = list(markers)
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if markerlist[i] != 'm':
|
||||||
|
continue
|
||||||
|
for pattern in SPLITTER_PATTERNS:
|
||||||
|
matcher = re.search(pattern, line)
|
||||||
|
if matcher:
|
||||||
|
markerlist[i] = 's'
|
||||||
|
break
|
||||||
|
|
||||||
|
return "".join(markerlist)
|
||||||
|
|
||||||
|
|
||||||
|
def _correct_splitlines_in_headers(markers, lines):
|
||||||
|
"""
|
||||||
|
Corrects markers by removing splitlines deemed to be inside header blocks.
|
||||||
|
"""
|
||||||
|
updated_markers = ""
|
||||||
|
i = 0
|
||||||
|
in_header_block = False
|
||||||
|
|
||||||
|
for m in markers:
|
||||||
|
# Only set in_header_block flag when we hit an 's' and line is a header
|
||||||
|
if m == 's':
|
||||||
|
if not in_header_block:
|
||||||
|
if bool(re.search(RE_HEADER, lines[i])):
|
||||||
|
in_header_block = True
|
||||||
|
else:
|
||||||
|
if QUOT_PATTERN.match(lines[i]):
|
||||||
|
m = 'm'
|
||||||
|
else:
|
||||||
|
m = 't'
|
||||||
|
|
||||||
|
# If the line is not a header line, set in_header_block false.
|
||||||
|
if not bool(re.search(RE_HEADER, lines[i])):
|
||||||
|
in_header_block = False
|
||||||
|
|
||||||
|
# Add the marker to the new updated markers string.
|
||||||
|
updated_markers += m
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
return updated_markers
|
||||||
|
|
||||||
|
|
||||||
|
def _readable_text_empty(html_tree):
|
||||||
|
return not bool(html_tree_to_text(html_tree).strip())
|
||||||
|
|
||||||
|
|
||||||
def is_splitter(line):
|
def is_splitter(line):
|
||||||
'''
|
'''
|
||||||
Returns Matcher object if provided string is a splitter and
|
Returns Matcher object if provided string is a splitter and
|
||||||
@@ -429,7 +578,7 @@ def is_splitter(line):
|
|||||||
|
|
||||||
def text_content(context):
|
def text_content(context):
|
||||||
'''XPath Extension function to return a node text content.'''
|
'''XPath Extension function to return a node text content.'''
|
||||||
return context.context_node.text_content().strip()
|
return context.context_node.xpath("string()").strip()
|
||||||
|
|
||||||
|
|
||||||
def tail(context):
|
def tail(context):
|
||||||
@@ -442,37 +591,3 @@ def register_xpath_extensions():
|
|||||||
ns.prefix = 'mg'
|
ns.prefix = 'mg'
|
||||||
ns['text_content'] = text_content
|
ns['text_content'] = text_content
|
||||||
ns['tail'] = tail
|
ns['tail'] = tail
|
||||||
|
|
||||||
|
|
||||||
def _restore_CRLF(s, replaced=True):
|
|
||||||
"""Restore CRLF if previously CRLF was replaced with LF
|
|
||||||
|
|
||||||
>>> _restore_CRLF('a\nb')
|
|
||||||
'a\r\nb'
|
|
||||||
>>> _restore_CRLF('a\nb', replaced=False)
|
|
||||||
'a\nb'
|
|
||||||
"""
|
|
||||||
if replaced:
|
|
||||||
return s.replace('\n', '\r\n')
|
|
||||||
return s
|
|
||||||
|
|
||||||
|
|
||||||
def _CRLF_to_LF(s):
|
|
||||||
"""Replace CRLF with LF
|
|
||||||
|
|
||||||
>>> s, changed = _CRLF_to_LF('a\r\n'b)
|
|
||||||
>>> s
|
|
||||||
'a\nb'
|
|
||||||
>>> changed
|
|
||||||
True
|
|
||||||
|
|
||||||
>>> s, changed = _CRLF_to_LF('a\n'b)
|
|
||||||
>>> s
|
|
||||||
'a\nb'
|
|
||||||
>>> changed
|
|
||||||
False
|
|
||||||
"""
|
|
||||||
delimiter = get_delimiter(s)
|
|
||||||
if delimiter == '\r\n':
|
|
||||||
return s.replace(delimiter, '\n'), True
|
|
||||||
return s, False
|
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ trained against, don't forget to regenerate:
|
|||||||
* signature/data/classifier
|
* signature/data/classifier
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from . import extraction
|
from . import extraction
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import regex as re
|
import regex as re
|
||||||
@@ -111,7 +112,7 @@ def extract_signature(msg_body):
|
|||||||
|
|
||||||
return (stripped_body.strip(),
|
return (stripped_body.strip(),
|
||||||
signature.strip())
|
signature.strip())
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
log.exception('ERROR extracting signature')
|
log.exception('ERROR extracting signature')
|
||||||
return (msg_body, None)
|
return (msg_body, None)
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ The classifier could be used to detect if a certain line of the message
|
|||||||
body belongs to the signature.
|
body belongs to the signature.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from numpy import genfromtxt
|
from numpy import genfromtxt
|
||||||
from sklearn.svm import LinearSVC
|
from sklearn.svm import LinearSVC
|
||||||
from sklearn.externals import joblib
|
from sklearn.externals import joblib
|
||||||
|
|||||||
@@ -16,11 +16,13 @@ suffix and the corresponding sender file has the same name except for the
|
|||||||
suffix which should be `_sender`.
|
suffix which should be `_sender`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
import os
|
import os
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
from talon.signature.constants import SIGNATURE_MAX_LINES
|
from talon.signature.constants import SIGNATURE_MAX_LINES
|
||||||
from talon.signature.learning.featurespace import build_pattern, features
|
from talon.signature.learning.featurespace import build_pattern, features
|
||||||
|
from six.moves import range
|
||||||
|
|
||||||
|
|
||||||
SENDER_SUFFIX = '_sender'
|
SENDER_SUFFIX = '_sender'
|
||||||
@@ -144,7 +146,7 @@ def build_extraction_dataset(folder, dataset_filename,
|
|||||||
if not sender or not msg:
|
if not sender or not msg:
|
||||||
continue
|
continue
|
||||||
lines = msg.splitlines()
|
lines = msg.splitlines()
|
||||||
for i in xrange(1, min(SIGNATURE_MAX_LINES,
|
for i in range(1, min(SIGNATURE_MAX_LINES,
|
||||||
len(lines)) + 1):
|
len(lines)) + 1):
|
||||||
line = lines[-i]
|
line = lines[-i]
|
||||||
label = -1
|
label = -1
|
||||||
|
|||||||
@@ -7,9 +7,12 @@ The body and the message sender string are converted into unicode before
|
|||||||
applying features to them.
|
applying features to them.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from talon.signature.constants import (SIGNATURE_MAX_LINES,
|
from talon.signature.constants import (SIGNATURE_MAX_LINES,
|
||||||
TOO_LONG_SIGNATURE_LINE)
|
TOO_LONG_SIGNATURE_LINE)
|
||||||
from talon.signature.learning.helpers import *
|
from talon.signature.learning.helpers import *
|
||||||
|
from six.moves import zip
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
|
|
||||||
def features(sender=''):
|
def features(sender=''):
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
import unicodedata
|
import unicodedata
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
@@ -184,12 +185,13 @@ def capitalized_words_percent(s):
|
|||||||
s = to_unicode(s, precise=True)
|
s = to_unicode(s, precise=True)
|
||||||
words = re.split('\s', s)
|
words = re.split('\s', s)
|
||||||
words = [w for w in words if w.strip()]
|
words = [w for w in words if w.strip()]
|
||||||
|
words = [w for w in words if len(w) > 2]
|
||||||
capitalized_words_counter = 0
|
capitalized_words_counter = 0
|
||||||
valid_words_counter = 0
|
valid_words_counter = 0
|
||||||
for word in words:
|
for word in words:
|
||||||
if not INVALID_WORD_START.match(word):
|
if not INVALID_WORD_START.match(word):
|
||||||
valid_words_counter += 1
|
valid_words_counter += 1
|
||||||
if word[0].isupper():
|
if word[0].isupper() and not word[1].isupper():
|
||||||
capitalized_words_counter += 1
|
capitalized_words_counter += 1
|
||||||
if valid_words_counter > 0 and len(words) > 1:
|
if valid_words_counter > 0 and len(words) > 1:
|
||||||
return 100 * float(capitalized_words_counter) / valid_words_counter
|
return 100 * float(capitalized_words_counter) / valid_words_counter
|
||||||
|
|||||||
170
talon/utils.py
170
talon/utils.py
@@ -1,11 +1,19 @@
|
|||||||
# coding:utf-8
|
# coding:utf-8
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
import logging
|
import logging
|
||||||
from random import shuffle
|
from random import shuffle
|
||||||
import chardet
|
import chardet
|
||||||
import cchardet
|
import cchardet
|
||||||
|
import regex as re
|
||||||
|
|
||||||
|
from lxml.html import html5parser
|
||||||
|
from lxml.cssselect import CSSSelector
|
||||||
|
|
||||||
|
import html5lib
|
||||||
|
|
||||||
from talon.constants import RE_DELIMITER
|
from talon.constants import RE_DELIMITER
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
def safe_format(format_string, *args, **kwargs):
|
def safe_format(format_string, *args, **kwargs):
|
||||||
@@ -24,7 +32,7 @@ def safe_format(format_string, *args, **kwargs):
|
|||||||
except (UnicodeEncodeError, UnicodeDecodeError):
|
except (UnicodeEncodeError, UnicodeDecodeError):
|
||||||
format_string = to_utf8(format_string)
|
format_string = to_utf8(format_string)
|
||||||
args = [to_utf8(p) for p in args]
|
args = [to_utf8(p) for p in args]
|
||||||
kwargs = {k: to_utf8(v) for k, v in kwargs.iteritems()}
|
kwargs = {k: to_utf8(v) for k, v in six.iteritems(kwargs)}
|
||||||
return format_string.format(*args, **kwargs)
|
return format_string.format(*args, **kwargs)
|
||||||
|
|
||||||
# ignore other errors
|
# ignore other errors
|
||||||
@@ -41,9 +49,9 @@ def to_unicode(str_or_unicode, precise=False):
|
|||||||
u'привет'
|
u'привет'
|
||||||
If `precise` flag is True, tries to guess the correct encoding first.
|
If `precise` flag is True, tries to guess the correct encoding first.
|
||||||
"""
|
"""
|
||||||
encoding = quick_detect_encoding(str_or_unicode) if precise else 'utf-8'
|
if not isinstance(str_or_unicode, six.text_type):
|
||||||
if isinstance(str_or_unicode, str):
|
encoding = quick_detect_encoding(str_or_unicode) if precise else 'utf-8'
|
||||||
return unicode(str_or_unicode, encoding, 'replace')
|
return six.text_type(str_or_unicode, encoding, 'replace')
|
||||||
return str_or_unicode
|
return str_or_unicode
|
||||||
|
|
||||||
|
|
||||||
@@ -53,12 +61,12 @@ def detect_encoding(string):
|
|||||||
|
|
||||||
Defaults to UTF-8.
|
Defaults to UTF-8.
|
||||||
"""
|
"""
|
||||||
|
assert isinstance(string, bytes)
|
||||||
try:
|
try:
|
||||||
detected = chardet.detect(string)
|
detected = chardet.detect(string)
|
||||||
if detected:
|
if detected:
|
||||||
return detected.get('encoding') or 'utf-8'
|
return detected.get('encoding') or 'utf-8'
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
print 11111111111, e
|
|
||||||
pass
|
pass
|
||||||
return 'utf-8'
|
return 'utf-8'
|
||||||
|
|
||||||
@@ -69,12 +77,12 @@ def quick_detect_encoding(string):
|
|||||||
|
|
||||||
Uses cchardet. Fallbacks to detect_encoding.
|
Uses cchardet. Fallbacks to detect_encoding.
|
||||||
"""
|
"""
|
||||||
|
assert isinstance(string, bytes)
|
||||||
try:
|
try:
|
||||||
detected = cchardet.detect(string)
|
detected = cchardet.detect(string)
|
||||||
if detected:
|
if detected:
|
||||||
return detected.get('encoding') or detect_encoding(string)
|
return detected.get('encoding') or detect_encoding(string)
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
print 222222222222, e
|
|
||||||
pass
|
pass
|
||||||
return detect_encoding(string)
|
return detect_encoding(string)
|
||||||
|
|
||||||
@@ -85,7 +93,7 @@ def to_utf8(str_or_unicode):
|
|||||||
>>> utils.to_utf8(u'hi')
|
>>> utils.to_utf8(u'hi')
|
||||||
'hi'
|
'hi'
|
||||||
"""
|
"""
|
||||||
if isinstance(str_or_unicode, unicode):
|
if not isinstance(str_or_unicode, six.text_type):
|
||||||
return str_or_unicode.encode("utf-8", "ignore")
|
return str_or_unicode.encode("utf-8", "ignore")
|
||||||
return str(str_or_unicode)
|
return str(str_or_unicode)
|
||||||
|
|
||||||
@@ -105,3 +113,147 @@ def get_delimiter(msg_body):
|
|||||||
delimiter = '\n'
|
delimiter = '\n'
|
||||||
|
|
||||||
return delimiter
|
return delimiter
|
||||||
|
|
||||||
|
|
||||||
|
def html_tree_to_text(tree):
|
||||||
|
for style in CSSSelector('style')(tree):
|
||||||
|
style.getparent().remove(style)
|
||||||
|
|
||||||
|
for c in tree.xpath('//comment()'):
|
||||||
|
parent = c.getparent()
|
||||||
|
|
||||||
|
# comment with no parent does not impact produced text
|
||||||
|
if parent is None:
|
||||||
|
continue
|
||||||
|
|
||||||
|
parent.remove(c)
|
||||||
|
|
||||||
|
text = ""
|
||||||
|
for el in tree.iter():
|
||||||
|
el_text = (el.text or '') + (el.tail or '')
|
||||||
|
if len(el_text) > 1:
|
||||||
|
if el.tag in _BLOCKTAGS:
|
||||||
|
text += "\n"
|
||||||
|
if el.tag == 'li':
|
||||||
|
text += " * "
|
||||||
|
text += el_text.strip() + " "
|
||||||
|
|
||||||
|
# add href to the output
|
||||||
|
href = el.attrib.get('href')
|
||||||
|
if href:
|
||||||
|
text += "(%s) " % href
|
||||||
|
|
||||||
|
if el.tag in _HARDBREAKS and text and not text.endswith("\n"):
|
||||||
|
text += "\n"
|
||||||
|
|
||||||
|
retval = _rm_excessive_newlines(text)
|
||||||
|
return _encode_utf8(retval)
|
||||||
|
|
||||||
|
|
||||||
|
def html_to_text(string):
|
||||||
|
"""
|
||||||
|
Dead-simple HTML-to-text converter:
|
||||||
|
>>> html_to_text("one<br>two<br>three")
|
||||||
|
>>> "one\ntwo\nthree"
|
||||||
|
|
||||||
|
NOTES:
|
||||||
|
1. the string is expected to contain UTF-8 encoded HTML!
|
||||||
|
2. returns utf-8 encoded str (not unicode)
|
||||||
|
3. if html can't be parsed returns None
|
||||||
|
"""
|
||||||
|
if isinstance(string, six.text_type):
|
||||||
|
string = string.encode('utf8')
|
||||||
|
|
||||||
|
s = _prepend_utf8_declaration(string)
|
||||||
|
s = s.replace(b"\n", b"")
|
||||||
|
tree = html_fromstring(s)
|
||||||
|
|
||||||
|
if tree is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return html_tree_to_text(tree)
|
||||||
|
|
||||||
|
|
||||||
|
def html_fromstring(s):
|
||||||
|
"""Parse html tree from string. Return None if the string can't be parsed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if html_too_big(s):
|
||||||
|
return None
|
||||||
|
|
||||||
|
return html5parser.fromstring(s, parser=_html5lib_parser())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def html_document_fromstring(s):
|
||||||
|
"""Parse html tree from string. Return None if the string can't be parsed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if html_too_big(s):
|
||||||
|
return None
|
||||||
|
|
||||||
|
return html5parser.document_fromstring(s, parser=_html5lib_parser())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def cssselect(expr, tree):
|
||||||
|
return CSSSelector(expr)(tree)
|
||||||
|
|
||||||
|
|
||||||
|
def html_too_big(s):
|
||||||
|
return s.count('<') > _MAX_TAGS_COUNT
|
||||||
|
|
||||||
|
|
||||||
|
def _contains_charset_spec(s):
|
||||||
|
"""Return True if the first 4KB contain charset spec
|
||||||
|
"""
|
||||||
|
return s.lower().find(b'html; charset=', 0, 4096) != -1
|
||||||
|
|
||||||
|
|
||||||
|
def _prepend_utf8_declaration(s):
|
||||||
|
"""Prepend 'utf-8' encoding declaration if the first 4KB don't have any
|
||||||
|
"""
|
||||||
|
return s if _contains_charset_spec(s) else _UTF8_DECLARATION + s
|
||||||
|
|
||||||
|
|
||||||
|
def _rm_excessive_newlines(s):
|
||||||
|
"""Remove excessive newlines that often happen due to tons of divs
|
||||||
|
"""
|
||||||
|
return _RE_EXCESSIVE_NEWLINES.sub("\n\n", s).strip()
|
||||||
|
|
||||||
|
|
||||||
|
def _encode_utf8(s):
|
||||||
|
"""Encode in 'utf-8' if unicode
|
||||||
|
"""
|
||||||
|
return s.encode('utf-8') if isinstance(s, six.text_type) else s
|
||||||
|
|
||||||
|
|
||||||
|
def _html5lib_parser():
|
||||||
|
"""
|
||||||
|
html5lib is a pure-python library that conforms to the WHATWG HTML spec
|
||||||
|
and is not vulnarable to certain attacks common for XML libraries
|
||||||
|
"""
|
||||||
|
return html5lib.HTMLParser(
|
||||||
|
# build lxml tree
|
||||||
|
html5lib.treebuilders.getTreeBuilder("lxml"),
|
||||||
|
# remove namespace value from inside lxml.html.html5paser element tag
|
||||||
|
# otherwise it yields something like "{http://www.w3.org/1999/xhtml}div"
|
||||||
|
# instead of "div", throwing the algo off
|
||||||
|
namespaceHTMLElements=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
_UTF8_DECLARATION = (b'<meta http-equiv="Content-Type" content="text/html;'
|
||||||
|
b'charset=utf-8">')
|
||||||
|
|
||||||
|
|
||||||
|
_BLOCKTAGS = ['div', 'p', 'ul', 'li', 'h1', 'h2', 'h3']
|
||||||
|
_HARDBREAKS = ['br', 'hr', 'tr']
|
||||||
|
|
||||||
|
_RE_EXCESSIVE_NEWLINES = re.compile("\n{2,10}")
|
||||||
|
|
||||||
|
# an extensive research shows that exceeding this limit
|
||||||
|
# might lead to excessive processing time
|
||||||
|
_MAX_TAGS_COUNT = 419
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
from nose.tools import *
|
from nose.tools import *
|
||||||
from mock import *
|
from mock import *
|
||||||
|
|
||||||
|
|||||||
87
tests/fixtures/html_replies/ms_outlook_2010.html
vendored
Normal file
87
tests/fixtures/html_replies/ms_outlook_2010.html
vendored
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta http-equiv="Content-Type" content="text/html; charset=iso-2022-jp">
|
||||||
|
<meta name="Generator" content="Microsoft Word 14 (filtered medium)">
|
||||||
|
<style><!--
|
||||||
|
/* Font Definitions */
|
||||||
|
@font-face
|
||||||
|
{font-family:Calibri;
|
||||||
|
panose-1:2 15 5 2 2 2 4 3 2 4;}
|
||||||
|
@font-face
|
||||||
|
{font-family:Tahoma;
|
||||||
|
panose-1:2 11 6 4 3 5 4 4 2 4;}
|
||||||
|
/* Style Definitions */
|
||||||
|
p.MsoNormal, li.MsoNormal, div.MsoNormal
|
||||||
|
{margin:0in;
|
||||||
|
margin-bottom:.0001pt;
|
||||||
|
font-size:12.0pt;
|
||||||
|
font-family:"Times New Roman","serif";}
|
||||||
|
h3
|
||||||
|
{mso-style-priority:9;
|
||||||
|
mso-style-link:"Heading 3 Char";
|
||||||
|
mso-margin-top-alt:auto;
|
||||||
|
margin-right:0in;
|
||||||
|
mso-margin-bottom-alt:auto;
|
||||||
|
margin-left:0in;
|
||||||
|
font-size:13.5pt;
|
||||||
|
font-family:"Times New Roman","serif";
|
||||||
|
font-weight:bold;}
|
||||||
|
a:link, span.MsoHyperlink
|
||||||
|
{mso-style-priority:99;
|
||||||
|
color:blue;
|
||||||
|
text-decoration:underline;}
|
||||||
|
a:visited, span.MsoHyperlinkFollowed
|
||||||
|
{mso-style-priority:99;
|
||||||
|
color:purple;
|
||||||
|
text-decoration:underline;}
|
||||||
|
p
|
||||||
|
{mso-style-priority:99;
|
||||||
|
mso-margin-top-alt:auto;
|
||||||
|
margin-right:0in;
|
||||||
|
mso-margin-bottom-alt:auto;
|
||||||
|
margin-left:0in;
|
||||||
|
font-size:12.0pt;
|
||||||
|
font-family:"Times New Roman","serif";}
|
||||||
|
span.Heading3Char
|
||||||
|
{mso-style-name:"Heading 3 Char";
|
||||||
|
mso-style-priority:9;
|
||||||
|
mso-style-link:"Heading 3";
|
||||||
|
font-family:"Cambria","serif";
|
||||||
|
color:#4F81BD;
|
||||||
|
font-weight:bold;}
|
||||||
|
span.EmailStyle19
|
||||||
|
{mso-style-type:personal-reply;
|
||||||
|
font-family:"Calibri","sans-serif";
|
||||||
|
color:#1F497D;}
|
||||||
|
.MsoChpDefault
|
||||||
|
{mso-style-type:export-only;
|
||||||
|
font-family:"Calibri","sans-serif";}
|
||||||
|
@page WordSection1
|
||||||
|
{size:8.5in 11.0in;
|
||||||
|
margin:1.0in 1.0in 1.0in 1.0in;}
|
||||||
|
div.WordSection1
|
||||||
|
{page:WordSection1;}
|
||||||
|
--></style><!--[if gte mso 9]><xml>
|
||||||
|
<o:shapedefaults v:ext="edit" spidmax="1026" />
|
||||||
|
</xml><![endif]--><!--[if gte mso 9]><xml>
|
||||||
|
<o:shapelayout v:ext="edit">
|
||||||
|
<o:idmap v:ext="edit" data="1" />
|
||||||
|
</o:shapelayout></xml><![endif]-->
|
||||||
|
</head>
|
||||||
|
<body lang="EN-US" link="blue" vlink="purple">
|
||||||
|
<div class="WordSection1">
|
||||||
|
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Hi. I am fine.<o:p></o:p></span></p>
|
||||||
|
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Thanks,<o:p></o:p></span></p>
|
||||||
|
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Alex<o:p></o:p></span></p>
|
||||||
|
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">From:</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> Foo [mailto:foo@bar.com]
|
||||||
|
<b>On Behalf Of </b>baz@bar.com<br>
|
||||||
|
<b>Sent:</b> Monday, January 01, 2000 12:00 AM<br>
|
||||||
|
<b>To:</b> john@bar.com<br>
|
||||||
|
<b>Cc:</b> jane@bar.io<br>
|
||||||
|
<b>Subject:</b> Conversation<o:p></o:p></span></p>
|
||||||
|
<p class="MsoNormal"><o:p> </o:p></p>
|
||||||
|
<p>Hello! How are you?<o:p></o:p></p>
|
||||||
|
<p class="MsoNormal"><o:p> </o:p></p>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
@@ -1,13 +1,12 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from . import *
|
from . import *
|
||||||
from . fixtures import *
|
from . fixtures import *
|
||||||
|
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
from talon import quotations
|
from talon import quotations, utils as u
|
||||||
|
|
||||||
import html2text
|
|
||||||
|
|
||||||
|
|
||||||
RE_WHITESPACE = re.compile("\s")
|
RE_WHITESPACE = re.compile("\s")
|
||||||
@@ -28,8 +27,8 @@ def test_quotation_splitter_inside_blockquote():
|
|||||||
|
|
||||||
</blockquote>"""
|
</blockquote>"""
|
||||||
|
|
||||||
eq_("<html><body><p>Reply\n</p></body></html>",
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
quotations.extract_from_html(msg_body))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
def test_quotation_splitter_outside_blockquote():
|
def test_quotation_splitter_outside_blockquote():
|
||||||
@@ -45,7 +44,7 @@ def test_quotation_splitter_outside_blockquote():
|
|||||||
</div>
|
</div>
|
||||||
</blockquote>
|
</blockquote>
|
||||||
"""
|
"""
|
||||||
eq_("<html><body><p>Reply</p><div></div></body></html>",
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -63,7 +62,7 @@ def test_regular_blockquote():
|
|||||||
</div>
|
</div>
|
||||||
</blockquote>
|
</blockquote>
|
||||||
"""
|
"""
|
||||||
eq_("<html><body><p>Reply</p><blockquote>Regular</blockquote><div></div></body></html>",
|
eq_("<html><head></head><body>Reply<blockquote>Regular</blockquote></body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -86,6 +85,7 @@ Reply
|
|||||||
|
|
||||||
reply = """
|
reply = """
|
||||||
<html>
|
<html>
|
||||||
|
<head></head>
|
||||||
<body>
|
<body>
|
||||||
Reply
|
Reply
|
||||||
|
|
||||||
@@ -129,7 +129,30 @@ def test_gmail_quote():
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>"""
|
</div>"""
|
||||||
eq_("<html><body><p>Reply</p></body></html>",
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
|
def test_gmail_quote_compact():
|
||||||
|
msg_body = 'Reply' \
|
||||||
|
'<div class="gmail_quote">' \
|
||||||
|
'<div class="gmail_quote">On 11-Apr-2011, at 6:54 PM, Bob <bob@example.com> wrote:' \
|
||||||
|
'<div>Test</div>' \
|
||||||
|
'</div>' \
|
||||||
|
'</div>'
|
||||||
|
eq_("<html><head></head><body>Reply</body></html>",
|
||||||
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
|
def test_gmail_quote_blockquote():
|
||||||
|
msg_body = """Message
|
||||||
|
<blockquote class="gmail_quote">
|
||||||
|
<div class="gmail_default">
|
||||||
|
My name is William Shakespeare.
|
||||||
|
<br/>
|
||||||
|
</div>
|
||||||
|
</blockquote>"""
|
||||||
|
eq_(RE_WHITESPACE.sub('', msg_body),
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -140,11 +163,11 @@ def test_unicode_in_reply():
|
|||||||
<br>
|
<br>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<blockquote class="gmail_quote">
|
<blockquote>
|
||||||
Quote
|
Quote
|
||||||
</blockquote>""".encode("utf-8")
|
</blockquote>""".encode("utf-8")
|
||||||
|
|
||||||
eq_("<html><body><p>Reply  Text<br></p><div><br></div>"
|
eq_("<html><head></head><body>Reply  Text<br><div><br></div>"
|
||||||
"</body></html>",
|
"</body></html>",
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
@@ -170,6 +193,7 @@ def test_blockquote_disclaimer():
|
|||||||
|
|
||||||
stripped_html = """
|
stripped_html = """
|
||||||
<html>
|
<html>
|
||||||
|
<head></head>
|
||||||
<body>
|
<body>
|
||||||
<div>
|
<div>
|
||||||
<div>
|
<div>
|
||||||
@@ -201,7 +225,7 @@ def test_date_block():
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
"""
|
"""
|
||||||
eq_('<html><body><div>message<br></div></body></html>',
|
eq_('<html><head></head><body><div>message<br></div></body></html>',
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -218,7 +242,7 @@ Subject: You Have New Mail From Mary!<br><br>
|
|||||||
text
|
text
|
||||||
</div></div>
|
</div></div>
|
||||||
"""
|
"""
|
||||||
eq_('<html><body><div>message<br></div></body></html>',
|
eq_('<html><head></head><body><div>message<br></div></body></html>',
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -236,7 +260,7 @@ def test_reply_shares_div_with_from_block():
|
|||||||
|
|
||||||
</div>
|
</div>
|
||||||
</body>'''
|
</body>'''
|
||||||
eq_('<html><body><div>Blah<br><br></div></body></html>',
|
eq_('<html><head></head><body><div>Blah<br><br></div></body></html>',
|
||||||
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
@@ -247,18 +271,35 @@ def test_reply_quotations_share_block():
|
|||||||
|
|
||||||
|
|
||||||
def test_OLK_SRC_BODY_SECTION_stripped():
|
def test_OLK_SRC_BODY_SECTION_stripped():
|
||||||
eq_('<html><body><div>Reply</div></body></html>',
|
eq_('<html><head></head><body><div>Reply</div></body></html>',
|
||||||
RE_WHITESPACE.sub(
|
RE_WHITESPACE.sub(
|
||||||
'', quotations.extract_from_html(OLK_SRC_BODY_SECTION)))
|
'', quotations.extract_from_html(OLK_SRC_BODY_SECTION)))
|
||||||
|
|
||||||
|
|
||||||
def test_reply_separated_by_hr():
|
def test_reply_separated_by_hr():
|
||||||
eq_('<html><body><div>Hi<div>there</div></div></body></html>',
|
eq_('<html><head></head><body><div>Hi<div>there</div></div></body></html>',
|
||||||
RE_WHITESPACE.sub(
|
RE_WHITESPACE.sub(
|
||||||
'', quotations.extract_from_html(REPLY_SEPARATED_BY_HR)))
|
'', quotations.extract_from_html(REPLY_SEPARATED_BY_HR)))
|
||||||
|
|
||||||
|
|
||||||
RE_REPLY = re.compile(r"^Hi\. I am fine\.\s*\n\s*Thanks,\s*\n\s*Alex\s*$")
|
def test_from_block_and_quotations_in_separate_divs():
|
||||||
|
msg_body = '''
|
||||||
|
Reply
|
||||||
|
<div>
|
||||||
|
<hr/>
|
||||||
|
<div>
|
||||||
|
<font>
|
||||||
|
<b>From: bob@example.com</b>
|
||||||
|
<b>Date: Thu, 24 Mar 2016 08:07:12 -0700</b>
|
||||||
|
</font>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
Quoted message
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
'''
|
||||||
|
eq_('<html><head></head><body>Reply<div><hr></div></body></html>',
|
||||||
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
def extract_reply_and_check(filename):
|
def extract_reply_and_check(filename):
|
||||||
@@ -266,18 +307,11 @@ def extract_reply_and_check(filename):
|
|||||||
|
|
||||||
msg_body = f.read()
|
msg_body = f.read()
|
||||||
reply = quotations.extract_from_html(msg_body)
|
reply = quotations.extract_from_html(msg_body)
|
||||||
|
plain_reply = u.html_to_text(reply)
|
||||||
|
plain_reply = plain_reply.decode('utf8')
|
||||||
|
|
||||||
h = html2text.HTML2Text()
|
eq_(RE_WHITESPACE.sub('', "Hi. I am fine.\n\nThanks,\nAlex"),
|
||||||
h.body_width = 0
|
RE_WHITESPACE.sub('', plain_reply))
|
||||||
plain_reply = h.handle(reply)
|
|
||||||
|
|
||||||
#remove spaces
|
|
||||||
plain_reply = plain_reply.replace(u'\xa0', u' ')
|
|
||||||
|
|
||||||
if RE_REPLY.match(plain_reply):
|
|
||||||
eq_(1, 1)
|
|
||||||
else:
|
|
||||||
eq_("Hi. I am fine.\n\nThanks,\nAlex", plain_reply)
|
|
||||||
|
|
||||||
|
|
||||||
def test_gmail_reply():
|
def test_gmail_reply():
|
||||||
@@ -300,6 +334,10 @@ def test_ms_outlook_2007_reply():
|
|||||||
extract_reply_and_check("tests/fixtures/html_replies/ms_outlook_2007.html")
|
extract_reply_and_check("tests/fixtures/html_replies/ms_outlook_2007.html")
|
||||||
|
|
||||||
|
|
||||||
|
def test_ms_outlook_2010_reply():
|
||||||
|
extract_reply_and_check("tests/fixtures/html_replies/ms_outlook_2010.html")
|
||||||
|
|
||||||
|
|
||||||
def test_thunderbird_reply():
|
def test_thunderbird_reply():
|
||||||
extract_reply_and_check("tests/fixtures/html_replies/thunderbird.html")
|
extract_reply_and_check("tests/fixtures/html_replies/thunderbird.html")
|
||||||
|
|
||||||
@@ -315,9 +353,13 @@ def test_yandex_ru_reply():
|
|||||||
def test_CRLF():
|
def test_CRLF():
|
||||||
"""CR is not converted to ' '
|
"""CR is not converted to ' '
|
||||||
"""
|
"""
|
||||||
eq_('<html>\r\n</html>', quotations.extract_from_html('<html>\r\n</html>'))
|
symbol = ' '
|
||||||
|
extracted = quotations.extract_from_html('<html>\r\n</html>')
|
||||||
|
assert_false(symbol in extracted)
|
||||||
|
eq_('<html></html>', RE_WHITESPACE.sub('', extracted))
|
||||||
|
|
||||||
msg_body = """Reply
|
msg_body = """My
|
||||||
|
reply
|
||||||
<blockquote>
|
<blockquote>
|
||||||
|
|
||||||
<div>
|
<div>
|
||||||
@@ -330,5 +372,50 @@ def test_CRLF():
|
|||||||
|
|
||||||
</blockquote>"""
|
</blockquote>"""
|
||||||
msg_body = msg_body.replace('\n', '\r\n')
|
msg_body = msg_body.replace('\n', '\r\n')
|
||||||
eq_("<html><body><p>Reply\r\n</p></body></html>",
|
extracted = quotations.extract_from_html(msg_body)
|
||||||
quotations.extract_from_html(msg_body))
|
assert_false(symbol in extracted)
|
||||||
|
# Keep new lines otherwise "My reply" becomes one word - "Myreply"
|
||||||
|
eq_("<html><head></head><body>My\nreply\n</body></html>", extracted)
|
||||||
|
|
||||||
|
|
||||||
|
def test_gmail_forwarded_msg():
|
||||||
|
msg_body = """<div dir="ltr"><br><div class="gmail_quote">---------- Forwarded message ----------<br>From: <b class="gmail_sendername">Bob</b> <span dir="ltr"><<a href="mailto:bob@example.com">bob@example.com</a>></span><br>Date: Fri, Feb 11, 2010 at 5:59 PM<br>Subject: Bob WFH today<br>To: Mary <<a href="mailto:mary@example.com">mary@example.com</a>><br><br><br><div dir="ltr">eom</div>
|
||||||
|
</div><br></div>"""
|
||||||
|
extracted = quotations.extract_from_html(msg_body)
|
||||||
|
eq_(RE_WHITESPACE.sub('', msg_body), RE_WHITESPACE.sub('', extracted))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u, '_MAX_TAGS_COUNT', 4)
|
||||||
|
def test_too_large_html():
|
||||||
|
msg_body = 'Reply' \
|
||||||
|
'<div class="gmail_quote">' \
|
||||||
|
'<div class="gmail_quote">On 11-Apr-2011, at 6:54 PM, Bob <bob@example.com> wrote:' \
|
||||||
|
'<div>Test</div>' \
|
||||||
|
'</div>' \
|
||||||
|
'</div>'
|
||||||
|
eq_(RE_WHITESPACE.sub('', msg_body),
|
||||||
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
|
def test_readable_html_empty():
|
||||||
|
msg_body = """
|
||||||
|
<blockquote>
|
||||||
|
Reply
|
||||||
|
<div>
|
||||||
|
On 11-Apr-2011, at 6:54 PM, Bob <bob@example.com> wrote:
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
Test
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</blockquote>"""
|
||||||
|
|
||||||
|
eq_(RE_WHITESPACE.sub('', msg_body),
|
||||||
|
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(quotations, 'html_document_fromstring', Mock(return_value=None))
|
||||||
|
def test_bad_html():
|
||||||
|
bad_html = "<html></html>"
|
||||||
|
eq_(bad_html, quotations.extract_from_html(bad_html))
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from . import *
|
from . import *
|
||||||
from . fixtures import *
|
from . fixtures import *
|
||||||
|
|
||||||
@@ -29,15 +30,3 @@ def test_crash_inside_extract_from():
|
|||||||
|
|
||||||
def test_empty_body():
|
def test_empty_body():
|
||||||
eq_('', quotations.extract_from_plain(''))
|
eq_('', quotations.extract_from_plain(''))
|
||||||
|
|
||||||
|
|
||||||
def test__CRLF_to_LF():
|
|
||||||
eq_(('\n\r', True), quotations._CRLF_to_LF('\r\n\r'))
|
|
||||||
eq_(('\n\r', False), quotations._CRLF_to_LF('\n\r'))
|
|
||||||
|
|
||||||
|
|
||||||
def test__restore_CRLF():
|
|
||||||
eq_('\n', quotations._restore_CRLF('\n', replaced=False))
|
|
||||||
eq_('\r\n', quotations._restore_CRLF('\n', replaced=True))
|
|
||||||
# default
|
|
||||||
eq_('\r\n', quotations._restore_CRLF('\n'))
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from .. import *
|
from .. import *
|
||||||
|
|
||||||
from talon.signature import bruteforce
|
from talon.signature import bruteforce
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from .. import *
|
from .. import *
|
||||||
|
|
||||||
import os
|
import os
|
||||||
@@ -8,6 +9,7 @@ from talon.signature.learning import dataset
|
|||||||
from talon import signature
|
from talon import signature
|
||||||
from talon.signature import extraction as e
|
from talon.signature import extraction as e
|
||||||
from talon.signature import bruteforce
|
from talon.signature import bruteforce
|
||||||
|
from six.moves import range
|
||||||
|
|
||||||
|
|
||||||
def test_message_shorter_SIGNATURE_MAX_LINES():
|
def test_message_shorter_SIGNATURE_MAX_LINES():
|
||||||
@@ -75,6 +77,31 @@ def test_basic():
|
|||||||
signature.extract(msg_body, 'Sergey'))
|
signature.extract(msg_body, 'Sergey'))
|
||||||
|
|
||||||
|
|
||||||
|
def test_capitalized():
|
||||||
|
msg_body = """Hi Mary,
|
||||||
|
|
||||||
|
Do you still need a DJ for your wedding? I've included a video demo of one of our DJs available for your wedding date.
|
||||||
|
|
||||||
|
DJ Doe
|
||||||
|
http://example.com
|
||||||
|
Password: SUPERPASSWORD
|
||||||
|
|
||||||
|
Would you like to check out more?
|
||||||
|
|
||||||
|
|
||||||
|
At your service,
|
||||||
|
|
||||||
|
John Smith
|
||||||
|
Doe Inc
|
||||||
|
555-531-7967"""
|
||||||
|
|
||||||
|
sig = """John Smith
|
||||||
|
Doe Inc
|
||||||
|
555-531-7967"""
|
||||||
|
|
||||||
|
eq_(sig, signature.extract(msg_body, 'Doe')[1])
|
||||||
|
|
||||||
|
|
||||||
def test_over_2_text_lines_after_signature():
|
def test_over_2_text_lines_after_signature():
|
||||||
body = """Blah
|
body = """Blah
|
||||||
|
|
||||||
@@ -127,20 +154,20 @@ def test_mark_lines():
|
|||||||
|
|
||||||
def test_process_marked_lines():
|
def test_process_marked_lines():
|
||||||
# no signature found
|
# no signature found
|
||||||
eq_((range(5), None), e._process_marked_lines(range(5), 'telt'))
|
eq_((list(range(5)), None), e._process_marked_lines(list(range(5)), 'telt'))
|
||||||
|
|
||||||
# signature in the middle of the text
|
# signature in the middle of the text
|
||||||
eq_((range(9), None), e._process_marked_lines(range(9), 'tesestelt'))
|
eq_((list(range(9)), None), e._process_marked_lines(list(range(9)), 'tesestelt'))
|
||||||
|
|
||||||
# long line splits signature
|
# long line splits signature
|
||||||
eq_((range(7), [7, 8]),
|
eq_((list(range(7)), [7, 8]),
|
||||||
e._process_marked_lines(range(9), 'tsslsless'))
|
e._process_marked_lines(list(range(9)), 'tsslsless'))
|
||||||
|
|
||||||
eq_((range(20), [20]),
|
eq_((list(range(20)), [20]),
|
||||||
e._process_marked_lines(range(21), 'ttttttstttesllelelets'))
|
e._process_marked_lines(list(range(21)), 'ttttttstttesllelelets'))
|
||||||
|
|
||||||
# some signature lines could be identified as text
|
# some signature lines could be identified as text
|
||||||
eq_(([0], range(1, 9)), e._process_marked_lines(range(9), 'tsetetest'))
|
eq_(([0], list(range(1, 9))), e._process_marked_lines(list(range(9)), 'tsetetest'))
|
||||||
|
|
||||||
eq_(([], range(5)),
|
eq_(([], list(range(5))),
|
||||||
e._process_marked_lines(range(5), "ststt"))
|
e._process_marked_lines(list(range(5)), "ststt"))
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from ... import *
|
from ... import *
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from ... import *
|
from ... import *
|
||||||
|
|
||||||
from talon.signature.learning import featurespace as fs
|
from talon.signature.learning import featurespace as fs
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from ... import *
|
from ... import *
|
||||||
|
|
||||||
import regex as re
|
import regex as re
|
||||||
|
|
||||||
from talon.signature.learning import helpers as h
|
from talon.signature.learning import helpers as h
|
||||||
from talon.signature.learning.helpers import *
|
from talon.signature.learning.helpers import *
|
||||||
|
from six.moves import range
|
||||||
|
|
||||||
# First testing regex constants.
|
# First testing regex constants.
|
||||||
VALID = '''
|
VALID = '''
|
||||||
@@ -154,7 +156,7 @@ def test_extract_names():
|
|||||||
# check that extracted names could be compiled
|
# check that extracted names could be compiled
|
||||||
try:
|
try:
|
||||||
re.compile("|".join(extracted_names))
|
re.compile("|".join(extracted_names))
|
||||||
except Exception, e:
|
except Exception as e:
|
||||||
ok_(False, ("Failed to compile extracted names {}"
|
ok_(False, ("Failed to compile extracted names {}"
|
||||||
"\n\nReason: {}").format(extracted_names, e))
|
"\n\nReason: {}").format(extracted_names, e))
|
||||||
if expected_names:
|
if expected_names:
|
||||||
@@ -190,10 +192,11 @@ def test_punctuation_percent(categories_percent):
|
|||||||
def test_capitalized_words_percent():
|
def test_capitalized_words_percent():
|
||||||
eq_(0.0, h.capitalized_words_percent(''))
|
eq_(0.0, h.capitalized_words_percent(''))
|
||||||
eq_(100.0, h.capitalized_words_percent('Example Corp'))
|
eq_(100.0, h.capitalized_words_percent('Example Corp'))
|
||||||
eq_(50.0, h.capitalized_words_percent('Qqq qqq QQQ 123 sss'))
|
eq_(50.0, h.capitalized_words_percent('Qqq qqq Aqs 123 sss'))
|
||||||
eq_(100.0, h.capitalized_words_percent('Cell 713-444-7368'))
|
eq_(100.0, h.capitalized_words_percent('Cell 713-444-7368'))
|
||||||
eq_(100.0, h.capitalized_words_percent('8th Floor'))
|
eq_(100.0, h.capitalized_words_percent('8th Floor'))
|
||||||
eq_(0.0, h.capitalized_words_percent('(212) 230-9276'))
|
eq_(0.0, h.capitalized_words_percent('(212) 230-9276'))
|
||||||
|
eq_(50.0, h.capitalized_words_percent('Password: REMARKABLE'))
|
||||||
|
|
||||||
|
|
||||||
def test_has_signature():
|
def test_has_signature():
|
||||||
@@ -204,7 +207,7 @@ def test_has_signature():
|
|||||||
'sender@example.com'))
|
'sender@example.com'))
|
||||||
assert_false(h.has_signature('http://www.example.com/555-555-5555',
|
assert_false(h.has_signature('http://www.example.com/555-555-5555',
|
||||||
'sender@example.com'))
|
'sender@example.com'))
|
||||||
long_line = ''.join(['q' for e in xrange(28)])
|
long_line = ''.join(['q' for e in range(28)])
|
||||||
assert_false(h.has_signature(long_line + ' sender', 'sender@example.com'))
|
assert_false(h.has_signature(long_line + ' sender', 'sender@example.com'))
|
||||||
# wont crash on an empty string
|
# wont crash on an empty string
|
||||||
assert_false(h.has_signature('', ''))
|
assert_false(h.has_signature('', ''))
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from . import *
|
from . import *
|
||||||
from . fixtures import *
|
from . fixtures import *
|
||||||
|
|
||||||
@@ -7,6 +8,9 @@ import os
|
|||||||
|
|
||||||
import email.iterators
|
import email.iterators
|
||||||
from talon import quotations
|
from talon import quotations
|
||||||
|
import six
|
||||||
|
from six.moves import range
|
||||||
|
from six import StringIO
|
||||||
|
|
||||||
|
|
||||||
@patch.object(quotations, 'MAX_LINES_COUNT', 1)
|
@patch.object(quotations, 'MAX_LINES_COUNT', 1)
|
||||||
@@ -32,6 +36,19 @@ On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> wrote:
|
|||||||
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
|
def test_pattern_sent_from_samsung_smb_wrote():
|
||||||
|
msg_body = """Test reply
|
||||||
|
|
||||||
|
Sent from Samsung MobileName <address@example.com> wrote:
|
||||||
|
|
||||||
|
>
|
||||||
|
> Test
|
||||||
|
>
|
||||||
|
> Roman"""
|
||||||
|
|
||||||
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
def test_pattern_on_date_wrote_somebody():
|
def test_pattern_on_date_wrote_somebody():
|
||||||
eq_('Lorem', quotations.extract_from_plain(
|
eq_('Lorem', quotations.extract_from_plain(
|
||||||
"""Lorem
|
"""Lorem
|
||||||
@@ -54,6 +71,18 @@ On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
|
|||||||
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
|
def test_date_time_email_splitter():
|
||||||
|
msg_body = """Test reply
|
||||||
|
|
||||||
|
2014-10-17 11:28 GMT+03:00 Postmaster <
|
||||||
|
postmaster@sandboxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.mailgun.org>:
|
||||||
|
|
||||||
|
> First from site
|
||||||
|
>
|
||||||
|
"""
|
||||||
|
eq_("Test reply", quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
def test_pattern_on_date_somebody_wrote_allows_space_in_front():
|
def test_pattern_on_date_somebody_wrote_allows_space_in_front():
|
||||||
msg_body = """Thanks Thanmai
|
msg_body = """Thanks Thanmai
|
||||||
On Mar 8, 2012 9:59 AM, "Example.com" <
|
On Mar 8, 2012 9:59 AM, "Example.com" <
|
||||||
@@ -113,7 +142,7 @@ def _check_pattern_original_message(original_message_indicator):
|
|||||||
-----{}-----
|
-----{}-----
|
||||||
|
|
||||||
Test"""
|
Test"""
|
||||||
eq_('Test reply', quotations.extract_from_plain(msg_body.format(unicode(original_message_indicator))))
|
eq_('Test reply', quotations.extract_from_plain(msg_body.format(six.text_type(original_message_indicator))))
|
||||||
|
|
||||||
def test_english_original_message():
|
def test_english_original_message():
|
||||||
_check_pattern_original_message('Original Message')
|
_check_pattern_original_message('Original Message')
|
||||||
@@ -637,6 +666,15 @@ def test_preprocess_postprocess_2_links():
|
|||||||
eq_(msg_body, quotations.extract_from_plain(msg_body))
|
eq_(msg_body, quotations.extract_from_plain(msg_body))
|
||||||
|
|
||||||
|
|
||||||
|
def body_iterator(msg, decode=False):
|
||||||
|
for subpart in msg.walk():
|
||||||
|
payload = subpart.get_payload(decode=decode)
|
||||||
|
if isinstance(payload, six.text_type):
|
||||||
|
yield payload
|
||||||
|
else:
|
||||||
|
yield payload.decode('utf8')
|
||||||
|
|
||||||
|
|
||||||
def test_standard_replies():
|
def test_standard_replies():
|
||||||
for filename in os.listdir(STANDARD_REPLIES):
|
for filename in os.listdir(STANDARD_REPLIES):
|
||||||
filename = os.path.join(STANDARD_REPLIES, filename)
|
filename = os.path.join(STANDARD_REPLIES, filename)
|
||||||
@@ -644,8 +682,8 @@ def test_standard_replies():
|
|||||||
continue
|
continue
|
||||||
with open(filename) as f:
|
with open(filename) as f:
|
||||||
message = email.message_from_file(f)
|
message = email.message_from_file(f)
|
||||||
body = email.iterators.typed_subpart_iterator(message, subtype='plain').next()
|
body = next(email.iterators.typed_subpart_iterator(message, subtype='plain'))
|
||||||
text = ''.join(email.iterators.body_line_iterator(body, True))
|
text = ''.join(body_iterator(body, True))
|
||||||
|
|
||||||
stripped_text = quotations.extract_from_plain(text)
|
stripped_text = quotations.extract_from_plain(text)
|
||||||
reply_text_fn = filename[:-4] + '_reply_text'
|
reply_text_fn = filename[:-4] + '_reply_text'
|
||||||
@@ -658,3 +696,52 @@ def test_standard_replies():
|
|||||||
"'%(reply)s' != %(stripped)s for %(fn)s" % \
|
"'%(reply)s' != %(stripped)s for %(fn)s" % \
|
||||||
{'reply': reply_text, 'stripped': stripped_text,
|
{'reply': reply_text, 'stripped': stripped_text,
|
||||||
'fn': filename}
|
'fn': filename}
|
||||||
|
|
||||||
|
|
||||||
|
def test_split_email():
|
||||||
|
msg = """From: Mr. X
|
||||||
|
Date: 24 February 2016
|
||||||
|
To: Mr. Y
|
||||||
|
Subject: Hi
|
||||||
|
Attachments: none
|
||||||
|
Goodbye.
|
||||||
|
From: Mr. Y
|
||||||
|
To: Mr. X
|
||||||
|
Date: 24 February 2016
|
||||||
|
Subject: Hi
|
||||||
|
Attachments: none
|
||||||
|
|
||||||
|
Hello.
|
||||||
|
|
||||||
|
On 24th February 2016 at 09.32am, Conal wrote:
|
||||||
|
|
||||||
|
Hey!
|
||||||
|
|
||||||
|
On Mon, 2016-10-03 at 09:45 -0600, Stangel, Dan wrote:
|
||||||
|
> Mohan,
|
||||||
|
>
|
||||||
|
> We have not yet migrated the systems.
|
||||||
|
>
|
||||||
|
> Dan
|
||||||
|
>
|
||||||
|
> > -----Original Message-----
|
||||||
|
> > Date: Mon, 2 Apr 2012 17:44:22 +0400
|
||||||
|
> > Subject: Test
|
||||||
|
> > From: bob@xxx.mailgun.org
|
||||||
|
> > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
|
||||||
|
> >
|
||||||
|
> > Hi
|
||||||
|
> >
|
||||||
|
> > > From: bob@xxx.mailgun.org
|
||||||
|
> > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
|
||||||
|
> > > Date: Mon, 2 Apr 2012 17:44:22 +0400
|
||||||
|
> > > Subject: Test
|
||||||
|
> > > Hi
|
||||||
|
> > >
|
||||||
|
> >
|
||||||
|
>
|
||||||
|
>
|
||||||
|
"""
|
||||||
|
expected_markers = "stttttsttttetesetesmmmmmmssmmmmmmsmmmmmmmm"
|
||||||
|
markers = quotations.split_emails(msg)
|
||||||
|
eq_(markers, expected_markers)
|
||||||
|
|||||||
@@ -1,9 +1,12 @@
|
|||||||
# coding:utf-8
|
# coding:utf-8
|
||||||
|
|
||||||
|
from __future__ import absolute_import
|
||||||
from . import *
|
from . import *
|
||||||
|
|
||||||
from talon import utils as u
|
from talon import utils as u
|
||||||
import cchardet
|
import cchardet
|
||||||
|
import six
|
||||||
|
from lxml import html
|
||||||
|
|
||||||
|
|
||||||
def test_get_delimiter():
|
def test_get_delimiter():
|
||||||
@@ -14,47 +17,142 @@ def test_get_delimiter():
|
|||||||
|
|
||||||
def test_unicode():
|
def test_unicode():
|
||||||
eq_ (u'hi', u.to_unicode('hi'))
|
eq_ (u'hi', u.to_unicode('hi'))
|
||||||
eq_ (type(u.to_unicode('hi')), unicode )
|
eq_ (type(u.to_unicode('hi')), six.text_type )
|
||||||
eq_ (type(u.to_unicode(u'hi')), unicode )
|
eq_ (type(u.to_unicode(u'hi')), six.text_type )
|
||||||
eq_ (type(u.to_unicode('привет')), unicode )
|
eq_ (type(u.to_unicode('привет')), six.text_type )
|
||||||
eq_ (type(u.to_unicode(u'привет')), unicode )
|
eq_ (type(u.to_unicode(u'привет')), six.text_type )
|
||||||
eq_ (u"привет", u.to_unicode('привет'))
|
eq_ (u"привет", u.to_unicode('привет'))
|
||||||
eq_ (u"привет", u.to_unicode(u'привет'))
|
eq_ (u"привет", u.to_unicode(u'привет'))
|
||||||
# some latin1 stuff
|
# some latin1 stuff
|
||||||
eq_ (u"Versión", u.to_unicode('Versi\xf3n', precise=True))
|
eq_ (u"Versión", u.to_unicode(u'Versi\xf3n'.encode('iso-8859-2'), precise=True))
|
||||||
|
|
||||||
|
|
||||||
def test_detect_encoding():
|
def test_detect_encoding():
|
||||||
eq_ ('ascii', u.detect_encoding('qwe').lower())
|
eq_ ('ascii', u.detect_encoding(b'qwe').lower())
|
||||||
eq_ ('iso-8859-2', u.detect_encoding('Versi\xf3n').lower())
|
eq_ ('iso-8859-2', u.detect_encoding(u'Versi\xf3n'.encode('iso-8859-2')).lower())
|
||||||
eq_ ('utf-8', u.detect_encoding('привет').lower())
|
eq_ ('utf-8', u.detect_encoding(u'привет'.encode('utf8')).lower())
|
||||||
# fallback to utf-8
|
# fallback to utf-8
|
||||||
with patch.object(u.chardet, 'detect') as detect:
|
with patch.object(u.chardet, 'detect') as detect:
|
||||||
detect.side_effect = Exception
|
detect.side_effect = Exception
|
||||||
eq_ ('utf-8', u.detect_encoding('qwe').lower())
|
eq_ ('utf-8', u.detect_encoding('qwe'.encode('utf8')).lower())
|
||||||
|
|
||||||
|
|
||||||
def test_quick_detect_encoding():
|
def test_quick_detect_encoding():
|
||||||
eq_ ('ascii', u.quick_detect_encoding('qwe').lower())
|
eq_ ('ascii', u.quick_detect_encoding(b'qwe').lower())
|
||||||
eq_ ('windows-1252', u.quick_detect_encoding('Versi\xf3n').lower())
|
eq_ ('windows-1252', u.quick_detect_encoding(u'Versi\xf3n'.encode('windows-1252')).lower())
|
||||||
eq_ ('utf-8', u.quick_detect_encoding('привет').lower())
|
eq_ ('utf-8', u.quick_detect_encoding(u'привет'.encode('utf8')).lower())
|
||||||
|
|
||||||
|
|
||||||
@patch.object(cchardet, 'detect')
|
@patch.object(cchardet, 'detect')
|
||||||
@patch.object(u, 'detect_encoding')
|
@patch.object(u, 'detect_encoding')
|
||||||
def test_quick_detect_encoding_edge_cases(detect_encoding, cchardet_detect):
|
def test_quick_detect_encoding_edge_cases(detect_encoding, cchardet_detect):
|
||||||
cchardet_detect.return_value = {'encoding': 'ascii'}
|
cchardet_detect.return_value = {'encoding': 'ascii'}
|
||||||
eq_('ascii', u.quick_detect_encoding("qwe"))
|
eq_('ascii', u.quick_detect_encoding(b"qwe"))
|
||||||
cchardet_detect.assert_called_once_with("qwe")
|
cchardet_detect.assert_called_once_with(b"qwe")
|
||||||
|
|
||||||
# fallback to detect_encoding
|
# fallback to detect_encoding
|
||||||
cchardet_detect.return_value = {}
|
cchardet_detect.return_value = {}
|
||||||
detect_encoding.return_value = 'utf-8'
|
detect_encoding.return_value = 'utf-8'
|
||||||
eq_('utf-8', u.quick_detect_encoding("qwe"))
|
eq_('utf-8', u.quick_detect_encoding(b"qwe"))
|
||||||
|
|
||||||
# exception
|
# exception
|
||||||
detect_encoding.reset_mock()
|
detect_encoding.reset_mock()
|
||||||
cchardet_detect.side_effect = Exception()
|
cchardet_detect.side_effect = Exception()
|
||||||
detect_encoding.return_value = 'utf-8'
|
detect_encoding.return_value = 'utf-8'
|
||||||
eq_('utf-8', u.quick_detect_encoding("qwe"))
|
eq_('utf-8', u.quick_detect_encoding(b"qwe"))
|
||||||
ok_(detect_encoding.called)
|
ok_(detect_encoding.called)
|
||||||
|
|
||||||
|
|
||||||
|
def test_html_to_text():
|
||||||
|
html = """<body>
|
||||||
|
<p>Hello world!</p>
|
||||||
|
<br>
|
||||||
|
<ul>
|
||||||
|
<li>One!</li>
|
||||||
|
<li>Two</li>
|
||||||
|
</ul>
|
||||||
|
<p>
|
||||||
|
Haha
|
||||||
|
</p>
|
||||||
|
</body>"""
|
||||||
|
text = u.html_to_text(html)
|
||||||
|
eq_(b"Hello world! \n\n * One! \n * Two \nHaha", text)
|
||||||
|
eq_(u"привет!", u.html_to_text("<b>привет!</b>").decode('utf8'))
|
||||||
|
|
||||||
|
html = '<body><br/><br/>Hi</body>'
|
||||||
|
eq_ (b'Hi', u.html_to_text(html))
|
||||||
|
|
||||||
|
html = """Hi
|
||||||
|
<style type="text/css">
|
||||||
|
|
||||||
|
div, p, li {
|
||||||
|
|
||||||
|
font: 13px 'Lucida Grande', Arial, sans-serif;
|
||||||
|
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
|
||||||
|
<style type="text/css">
|
||||||
|
|
||||||
|
h1 {
|
||||||
|
|
||||||
|
font: 13px 'Lucida Grande', Arial, sans-serif;
|
||||||
|
|
||||||
|
}
|
||||||
|
</style>"""
|
||||||
|
eq_ (b'Hi', u.html_to_text(html))
|
||||||
|
|
||||||
|
html = """<div>
|
||||||
|
<!-- COMMENT 1 -->
|
||||||
|
<span>TEXT 1</span>
|
||||||
|
<p>TEXT 2 <!-- COMMENT 2 --></p>
|
||||||
|
</div>"""
|
||||||
|
eq_(b'TEXT 1 \nTEXT 2', u.html_to_text(html))
|
||||||
|
|
||||||
|
|
||||||
|
def test_comment_no_parent():
|
||||||
|
s = "<!-- COMMENT 1 --> no comment"
|
||||||
|
d = u.html_document_fromstring(s)
|
||||||
|
eq_("no comment", u.html_tree_to_text(d))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u.html5parser, 'fromstring', Mock(side_effect=Exception()))
|
||||||
|
def test_html_fromstring_exception():
|
||||||
|
eq_(None, u.html_fromstring("<html></html>"))
|
||||||
|
|
||||||
|
@patch.object(u, 'html_too_big', Mock())
|
||||||
|
@patch.object(u.html5parser, 'fromstring')
|
||||||
|
def test_html_fromstring_too_big(fromstring):
|
||||||
|
eq_(None, u.html_fromstring("<html></html>"))
|
||||||
|
assert_false(fromstring.called)
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u.html5parser, 'document_fromstring')
|
||||||
|
def test_html_document_fromstring_exception(document_fromstring):
|
||||||
|
document_fromstring.side_effect = Exception()
|
||||||
|
eq_(None, u.html_document_fromstring("<html></html>"))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u, 'html_too_big', Mock())
|
||||||
|
@patch.object(u.html5parser, 'document_fromstring')
|
||||||
|
def test_html_document_fromstring_too_big(document_fromstring):
|
||||||
|
eq_(None, u.html_document_fromstring("<html></html>"))
|
||||||
|
assert_false(document_fromstring.called)
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u, 'html_fromstring', Mock(return_value=None))
|
||||||
|
def test_bad_html_to_text():
|
||||||
|
bad_html = "one<br>two<br>three"
|
||||||
|
eq_(None, u.html_to_text(bad_html))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u, '_MAX_TAGS_COUNT', 3)
|
||||||
|
def test_html_too_big():
|
||||||
|
eq_(False, u.html_too_big("<div></div>"))
|
||||||
|
eq_(True, u.html_too_big("<div><span>Hi</span></div>"))
|
||||||
|
|
||||||
|
|
||||||
|
@patch.object(u, '_MAX_TAGS_COUNT', 3)
|
||||||
|
def test_html_to_text():
|
||||||
|
eq_("Hello", u.html_to_text("<div>Hello</div>"))
|
||||||
|
eq_(None, u.html_to_text("<div><span>Hi</span></div>"))
|
||||||
|
|||||||
1
train.py
1
train.py
@@ -1,3 +1,4 @@
|
|||||||
|
from __future__ import absolute_import
|
||||||
from talon.signature import EXTRACTOR_FILENAME, EXTRACTOR_DATA
|
from talon.signature import EXTRACTOR_FILENAME, EXTRACTOR_DATA
|
||||||
from talon.signature.learning.classifier import train, init
|
from talon.signature.learning.classifier import train, init
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user