1 Commits

Author SHA1 Message Date
Ralph Meijer
2377c387c7 Actually bump up talon's version up to 1.0.5 to match the tag. 2015-09-09 22:46:18 +02:00
38 changed files with 352 additions and 1443 deletions

View File

@@ -1,20 +0,0 @@
FROM python:3.9-slim-buster AS deps
RUN apt-get update && \
apt-get install -y build-essential git curl python3-dev libatlas3-base libatlas-base-dev liblapack-dev libxml2 libxml2-dev libffi6 libffi-dev musl-dev libxslt-dev
FROM deps AS testable
ARG REPORT_PATH
VOLUME ["/var/mailgun", "/etc/mailgun/ssl", ${REPORT_PATH}]
ADD . /app
WORKDIR /app
COPY wheel/* /wheel/
RUN mkdir -p ${REPORT_PATH}
RUN python ./setup.py build bdist_wheel -d /wheel && \
pip install --no-deps /wheel/*
ENTRYPOINT ["/bin/sh", "/app/run_tests.sh"]

7
.gitignore vendored
View File

@@ -39,8 +39,6 @@ nosetests.xml
/.emacs.desktop
/.emacs.desktop.lock
.elc
.idea
.cache
auto-save-list
tramp
.\#*
@@ -53,7 +51,4 @@ tramp
_trial_temp
# OSX
.DS_Store
# vim-backup
*.bak
.DS_Store

View File

@@ -1,14 +1,9 @@
recursive-include tests *
recursive-include talon *
recursive-exclude tests *.pyc *~
recursive-exclude talon *.pyc *~
include train.data
include classifier
include LICENSE
include MANIFEST.in
include README.rst
include talon/signature/data/train.data
include talon/signature/data/classifier
include talon/signature/data/classifier_01.npy
include talon/signature/data/classifier_02.npy
include talon/signature/data/classifier_03.npy
include talon/signature/data/classifier_04.npy
include talon/signature/data/classifier_05.npy
include README.rst

View File

@@ -95,7 +95,7 @@ classifiers. The core of machine learning algorithm lays in
apply to a message (``featurespace.py``), how data sets are built
(``dataset.py``), classifiers interface (``classifier.py``).
Currently the data used for training is taken from our personal email
The data used for training is taken from our personal email
conversations and from `ENRON`_ dataset. As a result of applying our set
of features to the dataset we provide files ``classifier`` and
``train.data`` that dont have any personal information but could be
@@ -116,35 +116,8 @@ or
from talon.signature.learning.classifier import train, init
train(init(), EXTRACTOR_DATA, EXTRACTOR_FILENAME)
Open-source Dataset
-------------------
Recently we started a `forge`_ project to create an open-source, annotated dataset of raw emails. In the project we
used a subset of `ENRON`_ data, cleansed of private, health and financial information by `EDRM`_. At the moment over 190
emails are annotated. Any contribution and collaboration on the project are welcome. Once the dataset is ready we plan to
start using it for talon.
.. _scikit-learn: http://scikit-learn.org
.. _ENRON: https://www.cs.cmu.edu/~enron/
.. _EDRM: http://www.edrm.net/resources/data-sets/edrm-enron-email-data-set
.. _forge: https://github.com/mailgun/forge
Training on your dataset
------------------------
talon comes with a pre-processed dataset and a pre-trained classifier. To retrain the classifier on your own dataset of raw emails, structure and annotate them in the same way the `forge`_ project does. Then do:
.. code:: python
from talon.signature.learning.dataset import build_extraction_dataset
from talon.signature.learning import classifier as c
build_extraction_dataset("/path/to/your/P/folder", "/path/to/talon/signature/data/train.data")
c.train(c.init(), "/path/to/talon/signature/data/train.data", "/path/to/talon/signature/data/classifier")
Note that for signature extraction you need just the folder with the positive samples with annotated signature lines (P folder).
.. _forge: https://github.com/mailgun/forge
Research
--------

View File

@@ -1,11 +0,0 @@
chardet>=1.0.1
# cchardet>=0.3.5
cssselect
html5lib
joblib
lxml>=2.3.3
numpy
regex>=1
scikit-learn>=1.0.0
scipy
six>=1.10.0

View File

@@ -1,4 +0,0 @@
#!/usr/bin/env bash
set -ex
REPORT_PATH="${REPORT_PATH:-./}"
nosetests --with-xunit --with-coverage --cover-xml --cover-xml-file $REPORT_PATH/coverage.xml --xunit-file=$REPORT_PATH/nosetests.xml --cover-package=talon .

View File

@@ -1,64 +1,29 @@
from __future__ import absolute_import
from setuptools import setup, find_packages
from setuptools.command.install import install
class InstallCommand(install):
user_options = install.user_options + [
("no-ml", None, "Don't install without Machine Learning modules."),
]
boolean_options = install.boolean_options + ["no-ml"]
def initialize_options(self):
install.initialize_options(self)
self.no_ml = None
def finalize_options(self):
install.finalize_options(self)
if self.no_ml:
dist = self.distribution
dist.packages = find_packages(
exclude=[
"tests",
"tests.*",
"talon.signature",
"talon.signature.*",
]
)
for not_required in ["numpy", "scipy", "scikit-learn==0.24.1"]:
dist.install_requires.remove(not_required)
setup(
name="talon-o2w",
version="1.6.1",
description=(
"Mailgun library " "to extract message quotations and signatures."
),
long_description=open("README.rst").read(),
author="Mailgun Inc.",
author_email="admin@mailgunhq.com",
url="https://github.com/mailgun/talon",
license="APACHE2",
cmdclass={
"install": InstallCommand,
},
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
zip_safe=True,
install_requires=[
"lxml",
"regex",
"numpy",
"scipy",
"scikit-learn>=1.0.0",
"chardet",
# "cchardet",
"cssselect",
"six",
"html5lib",
"joblib",
],
tests_require=["mock", "nose", "coverage"],
)
setup(name='talon',
version='1.0.5',
description=("Mailgun library "
"to extract message quotations and signatures."),
long_description=open("README.rst").read(),
author='Mailgun Inc.',
author_email='admin@mailgunhq.com',
url='https://github.com/mailgun/talon',
license='APACHE2',
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
"lxml==2.3.3",
"regex>=1",
"html2text",
"numpy",
"scipy",
"scikit-learn==0.16.1", # pickled versions of classifier, else rebuild
],
tests_require=[
"mock",
"nose>=1.2.1",
"coverage"
]
)

View File

@@ -1,13 +1,7 @@
from __future__ import absolute_import
from talon.quotations import register_xpath_extensions
try:
from talon import signature
ML_ENABLED = True
except ImportError:
ML_ENABLED = False
from talon import signature
def init():
register_xpath_extensions()
if ML_ENABLED:
signature.initialize()
signature.initialize()

View File

@@ -1,4 +1,3 @@
from __future__ import absolute_import
import regex as re

View File

@@ -3,10 +3,8 @@ The module's functions operate on message bodies trying to extract original
messages (without quoted messages) from html
"""
from __future__ import absolute_import
import regex as re
from talon.utils import cssselect
CHECKPOINT_PREFIX = '#!%!'
CHECKPOINT_SUFFIX = '!%!#'
@@ -14,7 +12,6 @@ CHECKPOINT_PATTERN = re.compile(CHECKPOINT_PREFIX + '\d+' + CHECKPOINT_SUFFIX)
# HTML quote indicators (tag ids)
QUOTE_IDS = ['OLK_SRC_BODY_SECTION']
RE_FWD = re.compile("^[-]+[ ]*Forwarded message[ ]*[-]+$", re.I | re.M)
def add_checkpoint(html_note, counter):
@@ -79,32 +76,22 @@ def delete_quotation_tags(html_note, counter, quotation_checkpoints):
def cut_gmail_quote(html_message):
''' Cuts the outermost block element with class gmail_quote. '''
gmail_quote = cssselect('div.gmail_quote', html_message)
if gmail_quote and (gmail_quote[0].text is None or not RE_FWD.match(gmail_quote[0].text)):
gmail_quote = html_message.cssselect('.gmail_quote')
if gmail_quote:
gmail_quote[0].getparent().remove(gmail_quote[0])
return True
def cut_microsoft_quote(html_message):
''' Cuts splitter block and all following blocks. '''
#use EXSLT extensions to have a regex match() function with lxml
ns = {"re": "http://exslt.org/regular-expressions"}
#general pattern: @style='border:none;border-top:solid <color> 1.0pt;padding:3.0pt 0<unit> 0<unit> 0<unit>'
#outlook 2007, 2010 (international) <color=#B5C4DF> <unit=cm>
#outlook 2007, 2010 (american) <color=#B5C4DF> <unit=pt>
#outlook 2013 (international) <color=#E1E1E1> <unit=cm>
#outlook 2013 (american) <color=#E1E1E1> <unit=pt>
#also handles a variant with a space after the semicolon
splitter = html_message.xpath(
#outlook 2007, 2010, 2013 (international, american)
"//div[@style[re:match(., 'border:none; ?border-top:solid #(E1E1E1|B5C4DF) 1.0pt; ?"
"padding:3.0pt 0(in|cm) 0(in|cm) 0(in|cm)')]]|"
#outlook 2007, 2010
"//div[@style='border:none;border-top:solid #B5C4DF 1.0pt;"
"padding:3.0pt 0cm 0cm 0cm']|"
#windows mail
"//div[@style='padding-top: 5px; "
"border-top-color: rgb(229, 229, 229); "
"border-top-width: 1px; border-top-style: solid;']"
, namespaces=ns
)
if splitter:
@@ -143,7 +130,7 @@ def cut_microsoft_quote(html_message):
def cut_by_id(html_message):
found = False
for quote_id in QUOTE_IDS:
quote = cssselect('#{}'.format(quote_id), html_message)
quote = html_message.cssselect('#{}'.format(quote_id))
if quote:
found = True
quote[0].getparent().remove(quote[0])
@@ -151,14 +138,9 @@ def cut_by_id(html_message):
def cut_blockquote(html_message):
''' Cuts the last non-nested blockquote with wrapping elements.'''
quote = html_message.xpath(
'(.//blockquote)'
'[not(@class="gmail_quote") and not(ancestor::blockquote)]'
'[last()]')
if quote:
quote = quote[0]
''' Cuts blockquote with wrapping elements. '''
quote = html_message.find('.//blockquote')
if quote is not None:
quote.getparent().remove(quote)
return True
@@ -172,58 +154,21 @@ def cut_from_block(html_message):
if block:
block = block[-1]
parent_div = None
while block.getparent() is not None:
if block.tag == 'div':
parent_div = block
break
block = block.getparent()
if parent_div is not None:
maybe_body = parent_div.getparent()
# In cases where removing this enclosing div will remove all
# content, we should assume the quote is not enclosed in a tag.
parent_div_is_all_content = (
maybe_body is not None and maybe_body.tag == 'body' and
len(maybe_body.getchildren()) == 1)
if not parent_div_is_all_content:
parent = block.getparent()
next_sibling = block.getnext()
# remove all tags after found From block
# (From block and quoted message are in separate divs)
while next_sibling is not None:
parent.remove(block)
block = next_sibling
next_sibling = block.getnext()
# remove the last sibling (or the
# From block if no siblings)
if block is not None:
parent.remove(block)
block.getparent().remove(block)
return True
else:
return False
# handle the case when From: block goes right after e.g. <hr>
# and not enclosed in some tag
block = html_message.xpath(
("//*[starts-with(mg:tail(), 'From:')]|"
"//*[starts-with(mg:tail(), 'Date:')]"))
if block:
block = block[0]
if RE_FWD.match(block.getparent().text or ''):
return False
while(block.getnext() is not None):
block.getparent().remove(block.getnext())
block.getparent().remove(block)
return True
def cut_zimbra_quote(html_message):
zDivider = html_message.xpath('//hr[@data-marker="__DIVIDER__"]')
if zDivider:
zDivider[0].getparent().remove(zDivider[0])
return True
else:
block = block.getparent()
else:
# handle the case when From: block goes right after e.g. <hr>
# and not enclosed in some tag
block = html_message.xpath(
("//*[starts-with(mg:tail(), 'From:')]|"
"//*[starts-with(mg:tail(), 'Date:')]"))
if block:
block = block[0]
while(block.getnext() is not None):
block.getparent().remove(block.getnext())
block.getparent().remove(block)
return True

View File

@@ -5,26 +5,24 @@ The module's functions operate on message bodies trying to extract
original messages (without quoted messages)
"""
from __future__ import absolute_import
import regex as re
import logging
from copy import deepcopy
import regex as re
from lxml import etree, html
from six.moves import range
from lxml import html, etree
import html2text
from talon.utils import get_delimiter
from talon import html_quotations
from talon.utils import (get_delimiter, html_document_fromstring,
html_tree_to_text)
log = logging.getLogger(__name__)
RE_FWD = re.compile("^[-]+[ ]*Forwarded message[ ]*[-]+\s*$", re.I | re.M)
RE_FWD = re.compile("^[-]+[ ]*Forwarded message[ ]*[-]+$", re.I | re.M)
RE_ON_DATE_SMB_WROTE = re.compile(
u'(-*[>]?[ ]?({0})[ ].*({1})(.*\n){{0,2}}.*({2}):?-*)'.format(
u'(-*[ ]?({0})[ ].*({1})(.*\n){{0,2}}.*({2}):?-*)'.format(
# Beginning of the line
u'|'.join((
# English
@@ -34,17 +32,7 @@ RE_ON_DATE_SMB_WROTE = re.compile(
# Polish
'W dniu',
# Dutch
'Op',
# German
'Am',
# Portuguese
'Em',
# Norwegian
u'',
# Swedish, Danish
'Den',
# Vietnamese
u'Vào',
'Op'
)),
# Date and sender separator
u'|'.join((
@@ -62,38 +50,24 @@ RE_ON_DATE_SMB_WROTE = re.compile(
# Polish
u'napisał',
# Dutch
'schreef','verzond','geschreven',
# German
'schrieb',
# Portuguese
'escreveu',
# Norwegian, Swedish
'skrev',
# Vietnamese
u'đã viết',
'schreef','verzond','geschreven'
))
))
# Special case for languages where text is translated like this: 'on {date} wrote {somebody}:'
RE_ON_DATE_WROTE_SMB = re.compile(
u'(-*[>]?[ ]?({0})[ ].*(.*\n){{0,2}}.*({1})[ ]*.*:)'.format(
u'(-*[ ]?({0})[ ].*(.*\n){{0,2}}.*({1})[ ].*:)'.format(
# Beginning of the line
u'|'.join((
'Op',
#German
'Am'
)),
# Ending of the line
u'|'.join((
# Dutch
'schreef','verzond','geschreven',
# German
'schrieb'
'schreef','verzond','geschreven'
))
)
)
RE_QUOTATION = re.compile(
r"""
r'''
(
# quotation border: splitter line or a number of quotation marker lines
(?:
@@ -111,20 +85,20 @@ RE_QUOTATION = re.compile(
# after quotations should be text only or nothing at all
[te]*$
""", re.VERBOSE)
''', re.VERBOSE)
RE_EMPTY_QUOTATION = re.compile(
r"""
r'''
(
# quotation border: splitter line or a number of quotation marker lines
(?:
(?:se*)+
s
|
(?:me*){2,}
)
)
e*
""", re.VERBOSE)
''', re.VERBOSE)
# ------Original Message------ or ---- Reply Message ----
# With variations in other languages.
@@ -138,67 +112,37 @@ RE_ORIGINAL_MESSAGE = re.compile(u'[\s]*[-]+[ ]*({})[ ]*[-]+'.format(
'Oprindelig meddelelse',
))), re.I)
RE_FROM_COLON_OR_DATE_COLON = re.compile(u'((_+\r?\n)?[\s]*:?[*]?({})[\s]?:([^\n$]+\n){{1,2}}){{2,}}'.format(
RE_FROM_COLON_OR_DATE_COLON = re.compile(u'(_+\r?\n)?[\s]*(:?[*]?{})[\s]?:[*]? .*'.format(
u'|'.join((
# "From" in different languages.
'From', 'Van', 'De', 'Von', 'Fra', u'Från',
'From', 'Van', 'De', 'Von', 'Fra',
# "Date" in different languages.
'Date', '[S]ent', 'Datum', u'Envoyé', 'Skickat', 'Sendt', 'Gesendet',
# "Subject" in different languages.
'Subject', 'Betreff', 'Objet', 'Emne', u'Ämne',
# "To" in different languages.
'To', 'An', 'Til', u'À', 'Till'
))), re.I | re.M)
# ---- John Smith wrote ----
RE_ANDROID_WROTE = re.compile(u'[\s]*[-]+.*({})[ ]*[-]+'.format(
u'|'.join((
# English
'wrote',
'Date', 'Datum', u'Envoyé'
))), re.I)
# Support polymail.io reply format
# On Tue, Apr 11, 2017 at 10:07 PM John Smith
#
# <
# mailto:John Smith <johnsmith@gmail.com>
# > wrote:
RE_POLYMAIL = re.compile('On.*\s{2}<\smailto:.*\s> wrote:', re.I)
SPLITTER_PATTERNS = [
RE_ORIGINAL_MESSAGE,
# <date> <person>
re.compile("(\d+/\d+/\d+|\d+\.\d+\.\d+).*@", re.VERBOSE),
RE_ON_DATE_SMB_WROTE,
RE_ON_DATE_WROTE_SMB,
RE_FROM_COLON_OR_DATE_COLON,
# 02.04.2012 14:20 пользователь "bob@example.com" <
# bob@xxx.mailgun.org> написал:
re.compile("(\d+/\d+/\d+|\d+\.\d+\.\d+).*\s\S+@\S+", re.S),
# 2014-10-17 11:28 GMT+03:00 Bob <
# bob@example.com>:
re.compile("\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}\s+GMT.*\s\S+@\S+", re.S),
# Thu, 26 Jun 2014 14:00:51 +0400 Bob <bob@example.com>:
re.compile('\S{3,10}, \d\d? \S{3,10} 20\d\d,? \d\d?:\d\d(:\d\d)?'
'( \S+){3,6}@\S+:'),
# Sent from Samsung MobileName <address@example.com> wrote:
re.compile('Sent from Samsung.* \S+@\S+> wrote'),
RE_ANDROID_WROTE,
RE_POLYMAIL
'( \S+){3,6}@\S+:')
]
RE_LINK = re.compile('<(http://[^>]*)>')
RE_NORMALIZED_LINK = re.compile('@@(http://[^>@]*)@@')
RE_PARENTHESIS_LINK = re.compile("\(https?://")
SPLITTER_MAX_LINES = 6
SPLITTER_MAX_LINES = 4
MAX_LINES_COUNT = 1000
QUOT_PATTERN = re.compile('^>+ ?')
NO_QUOT_LINE = re.compile('^[^>].*[\S].*')
# Regular expression to identify if a line is a header.
RE_HEADER = re.compile(": ")
def extract_from(msg_body, content_type='text/plain'):
try:
@@ -212,19 +156,6 @@ def extract_from(msg_body, content_type='text/plain'):
return msg_body
def remove_initial_spaces_and_mark_message_lines(lines):
"""
Removes the initial spaces in each line before marking message lines.
This ensures headers can be identified if they are indented with spaces.
"""
i = 0
while i < len(lines):
lines[i] = lines[i].lstrip(' ')
i += 1
return mark_message_lines(lines)
def mark_message_lines(lines):
"""Mark message lines with markers to distinguish quotation lines.
@@ -238,7 +169,7 @@ def mark_message_lines(lines):
>>> mark_message_lines(['answer', 'From: foo@bar.com', '', '> question'])
'tsem'
"""
markers = ['e' for _ in lines]
markers = bytearray(len(lines))
i = 0
while i < len(lines):
if not lines[i].strip():
@@ -250,11 +181,10 @@ def mark_message_lines(lines):
else:
# in case splitter is spread across several lines
splitter = is_splitter('\n'.join(lines[i:i + SPLITTER_MAX_LINES]))
if splitter:
# append as many splitter markers as lines in splitter
splitter_lines = splitter.group().splitlines()
for j in range(len(splitter_lines)):
for j in xrange(len(splitter_lines)):
markers[i + j] = 's'
# skip splitter lines
@@ -264,7 +194,7 @@ def mark_message_lines(lines):
markers[i] = 't'
i += 1
return ''.join(markers)
return markers
def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
@@ -278,7 +208,6 @@ def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
return_flags = [were_lines_deleted, first_deleted_line,
last_deleted_line]
"""
markers = ''.join(markers)
# if there are no splitter there should be no markers
if 's' not in markers and not re.search('(me*){3}', markers):
markers = markers.replace('m', 't')
@@ -290,7 +219,7 @@ def process_marked_lines(lines, markers, return_flags=[False, -1, -1]):
# inlined reply
# use lookbehind assertions to find overlapping entries e.g. for 'mtmtm'
# both 't' entries should be found
for inline_reply in re.finditer('(?<=m)e*(t[te]*)m', markers):
for inline_reply in re.finditer('(?<=m)e*((?:t+e*)+)m', markers):
# long links could break sequence of quotation lines but they shouldn't
# be considered an inline reply
links = (
@@ -324,24 +253,10 @@ def preprocess(msg_body, delimiter, content_type='text/plain'):
Replaces link brackets so that they couldn't be taken for quotation marker.
Splits line in two if splitter pattern preceded by some text on the same
line (done only for 'On <date> <person> wrote:' pattern).
Converts msg_body into a unicode.
"""
msg_body = _replace_link_brackets(msg_body)
msg_body = _wrap_splitter_with_newline(msg_body, delimiter, content_type)
return msg_body
def _replace_link_brackets(msg_body):
"""
Normalize links i.e. replace '<', '>' wrapping the link with some symbols
so that '>' closing the link couldn't be mistakenly taken for quotation
marker.
Converts msg_body into a unicode
"""
# normalize links i.e. replace '<', '>' wrapping the link with some symbols
# so that '>' closing the link couldn't be mistakenly taken for quotation
# marker.
def link_wrapper(link):
newline_index = msg_body[:link.start()].rfind("\n")
if msg_body[newline_index + 1] == ">":
@@ -350,14 +265,7 @@ def _replace_link_brackets(msg_body):
return "@@%s@@" % link.group(1)
msg_body = re.sub(RE_LINK, link_wrapper, msg_body)
return msg_body
def _wrap_splitter_with_newline(msg_body, delimiter, content_type='text/plain'):
"""
Splits line in two if splitter pattern preceded by some text on the same
line (done only for 'On <date> <person> wrote:' pattern.
"""
def splitter_wrapper(splitter):
"""Wraps splitter with new line"""
if splitter.start() and msg_body[splitter.start() - 1] != '\n':
@@ -381,10 +289,16 @@ def postprocess(msg_body):
def extract_from_plain(msg_body):
"""Extracts a non quoted message from provided plain text."""
stripped_text = msg_body
delimiter = get_delimiter(msg_body)
msg_body = preprocess(msg_body, delimiter)
lines = msg_body.splitlines()
# don't process too long messages
lines = msg_body.splitlines()[:MAX_LINES_COUNT]
if len(lines) > MAX_LINES_COUNT:
return stripped_text
markers = mark_message_lines(lines)
lines = process_marked_lines(lines, markers)
@@ -409,60 +323,49 @@ def extract_from_html(msg_body):
then extracting quotations from text,
then checking deleted checkpoints,
then deleting necessary tags.
Returns a unicode string.
"""
if msg_body.strip() == "":
if msg_body.strip() == '':
return msg_body
msg_body = msg_body.replace("\r\n", "\n")
# Cut out xml and doctype tags to avoid conflict with unicode decoding.
msg_body = re.sub(r"\<\?xml.+\?\>|\<\!DOCTYPE.+]\>", "", msg_body)
html_tree = html_document_fromstring(msg_body)
if html_tree is None:
return msg_body
html_tree = html.document_fromstring(
msg_body,
parser=html.HTMLParser(encoding="utf-8")
)
result = extract_from_html_tree(html_tree)
if not result:
return msg_body
return result
def extract_from_html_tree(html_tree):
"""
Extract not quoted message from provided parsed html tree using tags and
plain text algorithm.
Cut out the 'blockquote', 'gmail_quote' tags.
Cut Microsoft quotations.
Then use plain text algorithm to cut out splitter or
leftover quotation.
This works by adding checkpoint text to all html tags,
then converting html to text,
then extracting quotations from text,
then checking deleted checkpoints,
then deleting necessary tags.
"""
cut_quotations = (html_quotations.cut_gmail_quote(html_tree) or
html_quotations.cut_zimbra_quote(html_tree) or
html_quotations.cut_blockquote(html_tree) or
html_quotations.cut_microsoft_quote(html_tree) or
html_quotations.cut_by_id(html_tree) or
html_quotations.cut_from_block(html_tree)
)
html_tree_copy = deepcopy(html_tree)
number_of_checkpoints = html_quotations.add_checkpoint(html_tree, 0)
quotation_checkpoints = [False] * number_of_checkpoints
plain_text = html_tree_to_text(html_tree)
plain_text = preprocess(plain_text, '\n', content_type='text/html')
msg_with_checkpoints = html.tostring(html_tree)
h = html2text.HTML2Text()
h.body_width = 0 # generate plain text without wrap
# html2text adds unnecessary star symbols. Remove them.
# Mask star symbols
msg_with_checkpoints = msg_with_checkpoints.replace('*', '3423oorkg432')
plain_text = h.handle(msg_with_checkpoints)
# Remove created star symbols
plain_text = plain_text.replace('*', '')
# Unmask saved star symbols
plain_text = plain_text.replace('3423oorkg432', '*')
delimiter = get_delimiter(plain_text)
plain_text = preprocess(plain_text, delimiter, content_type='text/html')
lines = plain_text.splitlines()
# Don't process too long messages
if len(lines) > MAX_LINES_COUNT:
return None
return msg_body
# Collect checkpoints on each line
line_checkpoints = [
@@ -480,174 +383,30 @@ def extract_from_html_tree(html_tree):
process_marked_lines(lines, markers, return_flags)
lines_were_deleted, first_deleted, last_deleted = return_flags
if not lines_were_deleted and not cut_quotations:
return None
if lines_were_deleted:
#collect checkpoints from deleted lines
for i in range(first_deleted, last_deleted):
for i in xrange(first_deleted, last_deleted):
for checkpoint in line_checkpoints[i]:
quotation_checkpoints[checkpoint] = True
else:
if cut_quotations:
return html.tostring(html_tree_copy)
else:
return msg_body
# Remove tags with quotation checkpoints
html_quotations.delete_quotation_tags(
html_tree_copy, 0, quotation_checkpoints
)
# Remove tags with quotation checkpoints
html_quotations.delete_quotation_tags(
html_tree_copy, 0, quotation_checkpoints
)
if _readable_text_empty(html_tree_copy):
return None
# NOTE: We remove_namespaces() because we are using an HTML5 Parser, HTML
# parsers do not recognize namespaces in HTML tags. As such the rendered
# HTML tags are no longer recognizable HTML tags. Example: <o:p> becomes
# <oU0003Ap>. When we port this to golang we should look into using an
# XML Parser NOT and HTML5 Parser since we do not know what input a
# customer will send us. Switching to a common XML parser in python
# opens us up to a host of vulnerabilities.
# See https://docs.python.org/3/library/xml.html#xml-vulnerabilities
#
# The down sides to removing the namespaces is that customers might
# judge the XML namespaces important. If that is the case then support
# should encourage customers to preform XML parsing of the un-stripped
# body to get the full unmodified XML payload.
#
# Alternatives to this approach are
# 1. Ignore the U0003A in tag names and let the customer deal with it.
# This is not ideal, as most customers use stripped-html for viewing
# emails sent from a recipient, as such they cannot control the HTML
# provided by a recipient.
# 2. Preform a string replace of 'U0003A' to ':' on the rendered HTML
# string. While this would solve the issue simply, it runs the risk
# of replacing data outside the <tag> which might be essential to
# the customer.
remove_namespaces(html_tree_copy)
s = html.tostring(html_tree_copy, encoding="ascii")
if not s:
return None
return s.decode("ascii")
def remove_namespaces(root):
"""
Given the root of an HTML document iterate through all the elements
and remove any namespaces that might have been provided and remove
any attributes that contain a namespace
<html xmlns:o="urn:schemas-microsoft-com:office:office">
becomes
<html>
<o:p>Hi</o:p>
becomes
<p>Hi</p>
Start tags do NOT have a namespace; COLON characters have no special meaning.
if we don't remove the namespace the parser translates the tag name into a
unicode representation. For example <o:p> becomes <oU0003Ap>
See https://www.w3.org/TR/2011/WD-html5-20110525/syntax.html#start-tags
"""
for child in root.iter():
for key, value in child.attrib.items():
# If the attribute includes a colon
if key.rfind("U0003A") != -1:
child.attrib.pop(key)
# If the tag includes a colon
idx = child.tag.rfind("U0003A")
if idx != -1:
child.tag = child.tag[idx+6:]
return root
def split_emails(msg):
"""
Given a message (which may consist of an email conversation thread with
multiple emails), mark the lines to identify split lines, content lines and
empty lines.
Correct the split line markers inside header blocks. Header blocks are
identified by the regular expression RE_HEADER.
Return the corrected markers
"""
msg_body = _replace_link_brackets(msg)
# don't process too long messages
lines = msg_body.splitlines()[:MAX_LINES_COUNT]
markers = remove_initial_spaces_and_mark_message_lines(lines)
markers = _mark_quoted_email_splitlines(markers, lines)
# we don't want splitlines in header blocks
markers = _correct_splitlines_in_headers(markers, lines)
return markers
def _mark_quoted_email_splitlines(markers, lines):
"""
When there are headers indented with '>' characters, this method will
attempt to identify if the header is a splitline header. If it is, then we
mark it with 's' instead of leaving it as 'm' and return the new markers.
"""
# Create a list of markers to easily alter specific characters
markerlist = list(markers)
for i, line in enumerate(lines):
if markerlist[i] != 'm':
continue
for pattern in SPLITTER_PATTERNS:
matcher = re.search(pattern, line)
if matcher:
markerlist[i] = 's'
break
return "".join(markerlist)
def _correct_splitlines_in_headers(markers, lines):
"""
Corrects markers by removing splitlines deemed to be inside header blocks.
"""
updated_markers = ""
i = 0
in_header_block = False
for m in markers:
# Only set in_header_block flag when we hit an 's' and line is a header
if m == 's':
if not in_header_block:
if bool(re.search(RE_HEADER, lines[i])):
in_header_block = True
else:
if QUOT_PATTERN.match(lines[i]):
m = 'm'
else:
m = 't'
# If the line is not a header line, set in_header_block false.
if not bool(re.search(RE_HEADER, lines[i])):
in_header_block = False
# Add the marker to the new updated markers string.
updated_markers += m
i += 1
return updated_markers
def _readable_text_empty(html_tree):
return not bool(html_tree_to_text(html_tree).strip())
return html.tostring(html_tree_copy)
def is_splitter(line):
"""
'''
Returns Matcher object if provided string is a splitter and
None otherwise.
"""
'''
for pattern in SPLITTER_PATTERNS:
matcher = re.match(pattern, line)
if matcher:
@@ -655,12 +414,12 @@ def is_splitter(line):
def text_content(context):
"""XPath Extension function to return a node text content."""
return context.context_node.xpath("string()").strip()
'''XPath Extension function to return a node text content.'''
return context.context_node.text_content().strip()
def tail(context):
"""XPath Extension function to return a node tail text."""
'''XPath Extension function to return a node tail text.'''
return context.context_node.tail or ''

View File

@@ -20,17 +20,19 @@ trained against, don't forget to regenerate:
* signature/data/classifier
"""
from __future__ import absolute_import
import os
from talon.signature import extraction
from talon.signature.extraction import extract
from talon.signature.learning import classifier
from . import extraction
from . extraction import extract #noqa
from . learning import classifier
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
EXTRACTOR_FILENAME = os.path.join(DATA_DIR, 'classifier')
EXTRACTOR_DATA = os.path.join(DATA_DIR, 'train.data')
def initialize():
data_dir = os.path.join(os.path.dirname(__file__), 'data')
extractor_filename = os.path.join(data_dir, 'classifier')
extractor_data_filename = os.path.join(data_dir, 'train.data')
extraction.EXTRACTOR = classifier.load(extractor_filename,
extractor_data_filename)
extraction.EXTRACTOR = classifier.load(EXTRACTOR_FILENAME,
EXTRACTOR_DATA)

View File

@@ -1,15 +1,14 @@
from __future__ import absolute_import
import logging
import regex as re
from talon.utils import get_delimiter
from talon.signature.constants import (SIGNATURE_MAX_LINES,
TOO_LONG_SIGNATURE_LINE)
from talon.utils import get_delimiter
log = logging.getLogger(__name__)
# regex to fetch signature based on common signature words
RE_SIGNATURE = re.compile(r'''
(
@@ -28,6 +27,7 @@ RE_SIGNATURE = re.compile(r'''
)
''', re.I | re.X | re.M | re.S)
# signatures appended by phone email clients
RE_PHONE_SIGNATURE = re.compile(r'''
(
@@ -44,6 +44,7 @@ RE_PHONE_SIGNATURE = re.compile(r'''
)
''', re.I | re.X | re.M | re.S)
# see _mark_candidate_indexes() for details
# c - could be signature line
# d - line starts with dashes (could be signature or list item)
@@ -110,7 +111,7 @@ def extract_signature(msg_body):
return (stripped_body.strip(),
signature.strip())
except Exception:
except Exception, e:
log.exception('ERROR extracting signature')
return (msg_body, None)
@@ -161,7 +162,7 @@ def _mark_candidate_indexes(lines, candidate):
'cdc'
"""
# at first consider everything to be potential signature lines
markers = list('c' * len(candidate))
markers = bytearray('c'*len(candidate))
# mark lines starting from bottom up
for i, line_idx in reversed(list(enumerate(candidate))):
@@ -172,7 +173,7 @@ def _mark_candidate_indexes(lines, candidate):
if line.startswith('-') and line.strip("-"):
markers[i] = 'd'
return "".join(markers)
return markers
def _process_marked_candidate_indexes(candidate, markers):

View File

@@ -1 +0,0 @@

Binary file not shown.

View File

@@ -1,15 +1,15 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import numpy
import regex as re
from talon.signature.bruteforce import get_signature_candidate
import numpy
from talon.signature.learning.featurespace import features, build_pattern
from talon.signature.learning.helpers import has_signature
from talon.utils import get_delimiter
from talon.signature.bruteforce import get_signature_candidate
from talon.signature.learning.helpers import has_signature
log = logging.getLogger(__name__)
@@ -32,7 +32,7 @@ RE_REVERSE_SIGNATURE = re.compile(r'''
def is_signature_line(line, sender, classifier):
'''Checks if the line belongs to signature. Returns True or False.'''
data = numpy.array(build_pattern(line, features(sender))).reshape(1, -1)
data = numpy.array(build_pattern(line, features(sender)))
return classifier.predict(data) > 0
@@ -57,7 +57,7 @@ def extract(body, sender):
text = delimiter.join(text)
if text.strip():
return (text, delimiter.join(signature))
except Exception as e:
except Exception:
log.exception('ERROR when extracting signature with classifiers')
return (body, None)
@@ -80,7 +80,7 @@ def _mark_lines(lines, sender):
candidate = get_signature_candidate(lines)
# at first consider everything to be text no signature
markers = list('t' * len(lines))
markers = bytearray('t'*len(lines))
# mark lines starting from bottom up
# mark only lines that belong to candidate
@@ -95,7 +95,7 @@ def _mark_lines(lines, sender):
elif is_signature_line(line, sender, EXTRACTOR):
markers[j] = 's'
return "".join(markers)
return markers
def _process_marked_lines(lines, markers):
@@ -110,4 +110,3 @@ def _process_marked_lines(lines, markers):
return (lines[:-signature.end()], lines[-signature.end():])
return (lines, None)

View File

@@ -5,11 +5,9 @@ The classifier could be used to detect if a certain line of the message
body belongs to the signature.
"""
from __future__ import absolute_import
from numpy import genfromtxt
import joblib
from sklearn.svm import LinearSVC
from sklearn.externals import joblib
def init():
@@ -30,40 +28,4 @@ def train(classifier, train_data_filename, save_classifier_filename=None):
def load(saved_classifier_filename, train_data_filename):
"""Loads saved classifier. """
try:
return joblib.load(saved_classifier_filename)
except Exception:
import sys
if sys.version_info > (3, 0):
return load_compat(saved_classifier_filename)
raise
def load_compat(saved_classifier_filename):
import os
import pickle
import tempfile
# we need to switch to the data path to properly load the related _xx.npy files
cwd = os.getcwd()
os.chdir(os.path.dirname(saved_classifier_filename))
# convert encoding using pick.load and write to temp file which we'll tell joblib to use
pickle_file = open(saved_classifier_filename, 'rb')
classifier = pickle.load(pickle_file, encoding='latin1')
try:
# save our conversion if permissions allow
joblib.dump(classifier, saved_classifier_filename)
except Exception:
# can't write to classifier, use a temp file
tmp = tempfile.SpooledTemporaryFile()
joblib.dump(classifier, tmp)
saved_classifier_filename = tmp
# important, use joblib.load before switching back to original cwd
jb_classifier = joblib.load(saved_classifier_filename)
os.chdir(cwd)
return jb_classifier
return joblib.load(saved_classifier_filename)

View File

@@ -16,16 +16,13 @@ suffix and the corresponding sender file has the same name except for the
suffix which should be `_sender`.
"""
from __future__ import absolute_import
import os
import regex as re
from six.moves import range
from talon.signature.constants import SIGNATURE_MAX_LINES
from talon.signature.learning.featurespace import build_pattern, features
SENDER_SUFFIX = '_sender'
BODY_SUFFIX = '_body'
@@ -58,14 +55,9 @@ def parse_msg_sender(filename, sender_known=True):
algorithm:
>>> parse_msg_sender(filename, False)
"""
import sys
kwargs = {}
if sys.version_info > (3, 0):
kwargs["encoding"] = "utf8"
sender, msg = None, None
if os.path.isfile(filename) and not is_sender_filename(filename):
with open(filename, **kwargs) as f:
with open(filename) as f:
msg = f.read()
sender = u''
if sender_known:
@@ -152,8 +144,8 @@ def build_extraction_dataset(folder, dataset_filename,
if not sender or not msg:
continue
lines = msg.splitlines()
for i in range(1, min(SIGNATURE_MAX_LINES,
len(lines)) + 1):
for i in xrange(1, min(SIGNATURE_MAX_LINES,
len(lines)) + 1):
line = lines[-i]
label = -1
if line[:len(SIGNATURE_ANNOTATION)] == \

View File

@@ -7,12 +7,9 @@ The body and the message sender string are converted into unicode before
applying features to them.
"""
from __future__ import absolute_import
from talon.signature.constants import (SIGNATURE_MAX_LINES,
TOO_LONG_SIGNATURE_LINE)
from talon.signature.learning.helpers import *
from six.moves import zip
from functools import reduce
def features(sender=''):

View File

@@ -5,17 +5,20 @@
* regexp's constants used when evaluating signature's features
"""
import unicodedata
import unicodedata
import regex as re
from talon.utils import to_unicode
from talon.signature.constants import SIGNATURE_MAX_LINES
rc = re.compile
RE_EMAIL = rc('\S@\S')
RE_EMAIL = rc('@')
RE_RELAX_PHONE = rc('(\(? ?[\d]{2,3} ?\)?.{,3}?){2,}')
RE_URL = rc(r"""https?://|www\.[\S]+\.[\S]""")
RE_URL = rc(r'''https?://|www\.[\S]+\.[\S]''')
# Taken from:
# http://www.cs.cmu.edu/~vitor/papers/sigFilePaper_finalversion.pdf
@@ -51,7 +54,7 @@ BAD_SENDER_NAMES = [
def binary_regex_search(prog):
"""Returns a function that returns 1 or 0 depending on regex search result.
'''Returns a function that returns 1 or 0 depending on regex search result.
If regular expression compiled into prog is present in a string
the result of calling the returned function with the string will be 1
@@ -62,12 +65,12 @@ def binary_regex_search(prog):
1
>>> binary_regex_search(re.compile("12"))("34")
0
"""
'''
return lambda s: 1 if prog.search(s) else 0
def binary_regex_match(prog):
"""Returns a function that returns 1 or 0 depending on regex match result.
'''Returns a function that returns 1 or 0 depending on regex match result.
If a string matches regular expression compiled into prog
the result of calling the returned function with the string will be 1
@@ -78,7 +81,7 @@ def binary_regex_match(prog):
1
>>> binary_regex_match(re.compile("12"))("3 12")
0
"""
'''
return lambda s: 1 if prog.match(s) else 0
@@ -98,7 +101,7 @@ def flatten_list(list_to_flatten):
def contains_sender_names(sender):
"""Returns a functions to search sender\'s name or it\'s part.
'''Returns a functions to search sender\'s name or it\'s part.
>>> feature = contains_sender_names("Sergey N. Obukhov <xxx@example.com>")
>>> feature("Sergey Obukhov")
@@ -111,13 +114,13 @@ def contains_sender_names(sender):
1
>>> contains_sender_names("<serobnic@mail.ru>")("serobnic")
1
"""
'''
names = '( |$)|'.join(flatten_list([[e, e.capitalize()]
for e in extract_names(sender)]))
names = names or sender
if names != '':
return binary_regex_search(re.compile(names))
return lambda s: 0
return lambda s: False
def extract_names(sender):
@@ -131,25 +134,20 @@ def extract_names(sender):
>>> extract_names('')
[]
"""
sender = to_unicode(sender)
# Remove non-alphabetical characters
sender = "".join([char if char.isalpha() else ' ' for char in sender])
# Remove too short words and words from "black" list i.e.
# words like `ru`, `gmail`, `com`, `org`, etc.
names = list()
for word in sender.split():
if len(word) < 2:
continue
if word in BAD_SENDER_NAMES:
continue
if word in names:
continue
names.append(word)
sender = [word for word in sender.split() if len(word) > 1 and
not word in BAD_SENDER_NAMES]
# Remove duplicates
names = list(set(sender))
return names
def categories_percent(s, categories):
"""Returns category characters percent.
'''Returns category characters percent.
>>> categories_percent("qqq ggg hhh", ["Po"])
0.0
@@ -161,8 +159,9 @@ def categories_percent(s, categories):
50.0
>>> categories_percent("s.s,5s", ["Po", "Nd"])
50.0
"""
'''
count = 0
s = to_unicode(s)
for c in s:
if unicodedata.category(c) in categories:
count += 1
@@ -170,27 +169,27 @@ def categories_percent(s, categories):
def punctuation_percent(s):
"""Returns punctuation percent.
'''Returns punctuation percent.
>>> punctuation_percent("qqq ggg hhh")
0.0
>>> punctuation_percent("q,w.")
50.0
"""
'''
return categories_percent(s, ['Po'])
def capitalized_words_percent(s):
"""Returns capitalized words percent."""
'''Returns capitalized words percent.'''
s = to_unicode(s)
words = re.split('\s', s)
words = [w for w in words if w.strip()]
words = [w for w in words if len(w) > 2]
capitalized_words_counter = 0
valid_words_counter = 0
for word in words:
if not INVALID_WORD_START.match(word):
valid_words_counter += 1
if word[0].isupper() and not word[1].isupper():
if word[0].isupper():
capitalized_words_counter += 1
if valid_words_counter > 0 and len(words) > 1:
return 100 * float(capitalized_words_counter) / valid_words_counter
@@ -207,26 +206,20 @@ def many_capitalized_words(s):
def has_signature(body, sender):
"""Checks if the body has signature. Returns True or False."""
'''Checks if the body has signature. Returns True or False.'''
non_empty = [line for line in body.splitlines() if line.strip()]
candidate = non_empty[-SIGNATURE_MAX_LINES:]
upvotes = 0
sender_check = contains_sender_names(sender)
for line in candidate:
# we check lines for sender's name, phone, email and url,
# those signature lines don't take more then 27 lines
if len(line.strip()) > 27:
continue
if sender_check(line):
elif contains_sender_names(sender)(line):
return True
if (binary_regex_search(RE_RELAX_PHONE)(line) +
binary_regex_search(RE_EMAIL)(line) +
binary_regex_search(RE_URL)(line) == 1):
elif (binary_regex_search(RE_RELAX_PHONE)(line) +
binary_regex_search(RE_EMAIL)(line) +
binary_regex_search(RE_URL)(line) == 1):
upvotes += 1
if upvotes > 1:
return True
return False

View File

@@ -1,17 +1,72 @@
# coding:utf-8
from __future__ import annotations
import html5lib
import regex as re
from html5lib import HTMLParser
from lxml.cssselect import CSSSelector
from lxml.etree import _Element
from lxml.html import html5parser
import logging
from random import shuffle
from talon.constants import RE_DELIMITER
def get_delimiter(msg_body: str) -> str:
log = logging.getLogger(__name__)
def safe_format(format_string, *args, **kwargs):
"""
Helper: formats string with any combination of bytestrings/unicode
strings without raising exceptions
"""
try:
if not args and not kwargs:
return format_string
else:
return format_string.format(*args, **kwargs)
# catch encoding errors and transform everything into utf-8 string
# before logging:
except (UnicodeEncodeError, UnicodeDecodeError):
format_string = to_utf8(format_string)
args = [to_utf8(p) for p in args]
kwargs = {k: to_utf8(v) for k, v in kwargs.iteritems()}
return format_string.format(*args, **kwargs)
# ignore other errors
except:
return u''
def to_unicode(str_or_unicode, precise=False):
"""
Safely returns a unicode version of a given string
>>> utils.to_unicode('привет')
u'привет'
>>> utils.to_unicode(u'привет')
u'привет'
If `precise` flag is True, tries to guess the correct encoding first.
"""
encoding = detect_encoding(str_or_unicode) if precise else 'utf-8'
if isinstance(str_or_unicode, str):
return unicode(str_or_unicode, encoding, 'replace')
return str_or_unicode
def to_utf8(str_or_unicode):
"""
Safely returns a UTF-8 version of a given string
>>> utils.to_utf8(u'hi')
'hi'
"""
if isinstance(str_or_unicode, unicode):
return str_or_unicode.encode("utf-8", "ignore")
return str(str_or_unicode)
def random_token(length=7):
vals = ("a b c d e f g h i j k l m n o p q r s t u v w x y z "
"0 1 2 3 4 5 6 7 8 9").split(' ')
shuffle(vals)
return ''.join(vals[:length])
def get_delimiter(msg_body):
delimiter = RE_DELIMITER.search(msg_body)
if delimiter:
delimiter = delimiter.group()
@@ -19,117 +74,3 @@ def get_delimiter(msg_body: str) -> str:
delimiter = '\n'
return delimiter
def html_tree_to_text(tree: _Element) -> str:
for style in CSSSelector('style')(tree):
style.getparent().remove(style)
for c in tree.xpath('//comment()'):
parent = c.getparent()
# comment with no parent does not impact produced text
if parent is None:
continue
parent.remove(c)
text = ""
for el in tree.iter():
el_text = (el.text or '') + (el.tail or '')
if len(el_text) > 1:
if el.tag in _BLOCKTAGS + _HARDBREAKS:
text += "\n"
if el.tag == 'li':
text += " * "
text += el_text.strip() + " "
# add href to the output
href = el.attrib.get('href')
if href:
text += "(%s) " % href
if (el.tag in _HARDBREAKS and text and
not text.endswith("\n") and not el_text):
text += "\n"
text = _rm_excessive_newlines(text)
return text
def html_to_text(s: str) -> str | None:
"""
Dead-simple HTML-to-text converter:
>>> html_to_text("one<br>two<br>three")
<<< "one\ntwo\nthree"
NOTES:
1. the string is expected to contain UTF-8 encoded HTML!
3. if html can't be parsed returns None
"""
s = _prepend_utf8_declaration(s)
s = s.replace("\n", "")
tree = html_fromstring(s)
if tree is None:
return None
return html_tree_to_text(tree)
def html_fromstring(s: str) -> _Element:
"""Parse html tree from string. Return None if the string can't be parsed.
"""
return html5parser.fromstring(s, parser=_html5lib_parser())
def html_document_fromstring(s: str) -> _Element:
"""Parse html tree from string. Return None if the string can't be parsed.
"""
return html5parser.document_fromstring(s, parser=_html5lib_parser())
def cssselect(expr: str, tree: str) -> list[_Element]:
return CSSSelector(expr)(tree)
def _contains_charset_spec(s: str) -> str:
"""Return True if the first 4KB contain charset spec
"""
return s.lower().find('html; charset=', 0, 4096) != -1
def _prepend_utf8_declaration(s: str) -> str:
"""Prepend 'utf-8' encoding declaration if the first 4KB don't have any
"""
return s if _contains_charset_spec(s) else _UTF8_DECLARATION + s
def _rm_excessive_newlines(s: str) -> str:
"""Remove excessive newlines that often happen due to tons of divs
"""
return _RE_EXCESSIVE_NEWLINES.sub("\n\n", s).strip()
def _html5lib_parser() -> HTMLParser:
"""
html5lib is a pure-python library that conforms to the WHATWG HTML spec
and is not vulnarable to certain attacks common for XML libraries
"""
return HTMLParser(
# build lxml tree
html5lib.treebuilders.getTreeBuilder("lxml"),
# remove namespace value from inside lxml.html.html5paser element tag
# otherwise it yields something like "{http://www.w3.org/1999/xhtml}div"
# instead of "div", throwing the algo off
namespaceHTMLElements=False
)
_UTF8_DECLARATION = ('<meta http-equiv="Content-Type" content="text/html;'
'charset=utf-8">')
_BLOCKTAGS = ['div', 'p', 'ul', 'li', 'h1', 'h2', 'h3']
_HARDBREAKS = ['br', 'hr', 'tr']
_RE_EXCESSIVE_NEWLINES = re.compile("\n{2,10}")

View File

@@ -1,3 +0,0 @@
coverage
mock
nose>=1.2.1

View File

@@ -1,4 +1,3 @@
from __future__ import absolute_import
from nose.tools import *
from mock import *

View File

@@ -1,4 +1,3 @@
<?xml version="1.0" encoding="UTF-8"?>
<html>
<head>
<style><!--

View File

@@ -1,87 +0,0 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-2022-jp">
<meta name="Generator" content="Microsoft Word 14 (filtered medium)">
<style><!--
/* Font Definitions */
@font-face
{font-family:Calibri;
panose-1:2 15 5 2 2 2 4 3 2 4;}
@font-face
{font-family:Tahoma;
panose-1:2 11 6 4 3 5 4 4 2 4;}
/* Style Definitions */
p.MsoNormal, li.MsoNormal, div.MsoNormal
{margin:0in;
margin-bottom:.0001pt;
font-size:12.0pt;
font-family:"Times New Roman","serif";}
h3
{mso-style-priority:9;
mso-style-link:"Heading 3 Char";
mso-margin-top-alt:auto;
margin-right:0in;
mso-margin-bottom-alt:auto;
margin-left:0in;
font-size:13.5pt;
font-family:"Times New Roman","serif";
font-weight:bold;}
a:link, span.MsoHyperlink
{mso-style-priority:99;
color:blue;
text-decoration:underline;}
a:visited, span.MsoHyperlinkFollowed
{mso-style-priority:99;
color:purple;
text-decoration:underline;}
p
{mso-style-priority:99;
mso-margin-top-alt:auto;
margin-right:0in;
mso-margin-bottom-alt:auto;
margin-left:0in;
font-size:12.0pt;
font-family:"Times New Roman","serif";}
span.Heading3Char
{mso-style-name:"Heading 3 Char";
mso-style-priority:9;
mso-style-link:"Heading 3";
font-family:"Cambria","serif";
color:#4F81BD;
font-weight:bold;}
span.EmailStyle19
{mso-style-type:personal-reply;
font-family:"Calibri","sans-serif";
color:#1F497D;}
.MsoChpDefault
{mso-style-type:export-only;
font-family:"Calibri","sans-serif";}
@page WordSection1
{size:8.5in 11.0in;
margin:1.0in 1.0in 1.0in 1.0in;}
div.WordSection1
{page:WordSection1;}
--></style><!--[if gte mso 9]><xml>
<o:shapedefaults v:ext="edit" spidmax="1026" />
</xml><![endif]--><!--[if gte mso 9]><xml>
<o:shapelayout v:ext="edit">
<o:idmap v:ext="edit" data="1" />
</o:shapelayout></xml><![endif]-->
</head>
<body lang="EN-US" link="blue" vlink="purple">
<div class="WordSection1">
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Hi. I am fine.<o:p></o:p></span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Thanks,<o:p></o:p></span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:&quot;Calibri&quot;,&quot;sans-serif&quot;;color:#1F497D">Alex<o:p></o:p></span></p>
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;">From:</span></b><span style="font-size:10.0pt;font-family:&quot;Tahoma&quot;,&quot;sans-serif&quot;"> Foo [mailto:foo@bar.com]
<b>On Behalf Of </b>baz@bar.com<br>
<b>Sent:</b> Monday, January 01, 2000 12:00 AM<br>
<b>To:</b> john@bar.com<br>
<b>Cc:</b> jane@bar.io<br>
<b>Subject:</b> Conversation<o:p></o:p></span></p>
<p class="MsoNormal"><o:p>&nbsp;</o:p></p>
<p>Hello! How are you?<o:p></o:p></p>
<p class="MsoNormal"><o:p>&nbsp;</o:p></p>
</div>
</body>
</html>

View File

@@ -1,19 +0,0 @@
Content-Type: text/plain;
charset=us-ascii
Mime-Version: 1.0 (Mac OS X Mail 8.2 \(2104\))
Subject: Re: Hello there
X-Universally-Unique-Identifier: 85B1075D-5841-46A9-8565-FCB287A93AC4
From: Adam Renberg <adam@tictail.com>
In-Reply-To: <CABzQGhkMXDxUt_tSVQcg=43aniUhtsVfCZVzu-PG0kwS_uzqMw@mail.gmail.com>
Date: Sat, 22 Aug 2015 19:22:20 +0200
Content-Transfer-Encoding: 7bit
X-Smtp-Server: smtp.gmail.com:adam@tictail.com
Message-Id: <68001B29-8EA4-444C-A894-0537D2CA5208@tictail.com>
References: <CABzQGhkMXDxUt_tSVQcg=43aniUhtsVfCZVzu-PG0kwS_uzqMw@mail.gmail.com>
To: Adam Renberg <tgwizard@gmail.com>
Hello
> On 22 Aug 2015, at 19:21, Adam Renberg <tgwizard@gmail.com> wrote:
>
> Hi there!

View File

@@ -1,20 +1,17 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from . import *
from . fixtures import *
# noinspection PyUnresolvedReferences
import re
from unittest.mock import Mock, patch
import regex as re
from nose.tools import assert_false, assert_true, eq_, ok_
from talon import quotations
from tests.fixtures import (OLK_SRC_BODY_SECTION,
REPLY_QUOTATIONS_SHARE_BLOCK,
REPLY_SEPARATED_BY_HR)
from talon import quotations, utils as u
import html2text
RE_WHITESPACE = re.compile(r"\s")
RE_DOUBLE_WHITESPACE = re.compile(r"\s")
RE_WHITESPACE = re.compile("\s")
RE_DOUBLE_WHITESPACE = re.compile("\s")
def test_quotation_splitter_inside_blockquote():
@@ -31,7 +28,7 @@ def test_quotation_splitter_inside_blockquote():
</blockquote>"""
eq_("<html><head></head><body>Reply</body></html>",
eq_("<html><body><p>Reply</p></body></html>",
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@@ -48,25 +45,7 @@ def test_quotation_splitter_outside_blockquote():
</div>
</blockquote>
"""
eq_("<html><head></head><body>Reply</body></html>",
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
def test_regular_blockquote():
msg_body = """Reply
<blockquote>Regular</blockquote>
<div>
On 11-Apr-2011, at 6:54 PM, Bob &lt;bob@example.com&gt; wrote:
</div>
<blockquote>
<div>
<blockquote>Nested</blockquote>
</div>
</blockquote>
"""
eq_("<html><head></head><body>Reply<blockquote>Regular</blockquote></body></html>",
eq_("<html><body><p>Reply</p><div></div></body></html>",
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@@ -89,7 +68,6 @@ Reply
reply = """
<html>
<head></head>
<body>
Reply
@@ -133,30 +111,7 @@ def test_gmail_quote():
</div>
</div>
</div>"""
eq_("<html><head></head><body>Reply</body></html>",
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
def test_gmail_quote_compact():
msg_body = 'Reply' \
'<div class="gmail_quote">' \
'<div class="gmail_quote">On 11-Apr-2011, at 6:54 PM, Bob &lt;bob@example.com&gt; wrote:' \
'<div>Test</div>' \
'</div>' \
'</div>'
eq_("<html><head></head><body>Reply</body></html>",
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
def test_gmail_quote_blockquote():
msg_body = """Message
<blockquote class="gmail_quote">
<div class="gmail_default">
My name is William Shakespeare.
<br/>
</div>
</blockquote>"""
eq_(RE_WHITESPACE.sub('', msg_body),
eq_("<html><body><p>Reply</p></body></html>",
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@@ -167,11 +122,11 @@ def test_unicode_in_reply():
<br>
</div>
<blockquote>
<blockquote class="gmail_quote">
Quote
</blockquote>"""
</blockquote>""".encode("utf-8")
eq_("<html><head></head><body>Reply&#160;&#160;Text<br><div><br></div>"
eq_("<html><body><p>Reply&#160;&#160;Text<br></p><div><br></div>"
"</body></html>",
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@@ -197,7 +152,6 @@ def test_blockquote_disclaimer():
stripped_html = """
<html>
<head></head>
<body>
<div>
<div>
@@ -229,7 +183,7 @@ def test_date_block():
</div>
</div>
"""
eq_('<html><head></head><body><div>message<br></div></body></html>',
eq_('<html><body><div>message<br></div></body></html>',
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@@ -246,7 +200,7 @@ Subject: You Have New Mail From Mary!<br><br>
text
</div></div>
"""
eq_('<html><head></head><body><div>message<br></div></body></html>',
eq_('<html><body><div>message<br></div></body></html>',
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@@ -264,7 +218,7 @@ def test_reply_shares_div_with_from_block():
</div>
</body>'''
eq_('<html><head></head><body><div>Blah<br><br></div></body></html>',
eq_('<html><body><div>Blah<br><br></div></body></html>',
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@@ -275,51 +229,37 @@ def test_reply_quotations_share_block():
def test_OLK_SRC_BODY_SECTION_stripped():
eq_('<html><head></head><body><div>Reply</div></body></html>',
eq_('<html><body><div>Reply</div></body></html>',
RE_WHITESPACE.sub(
'', quotations.extract_from_html(OLK_SRC_BODY_SECTION)))
def test_reply_separated_by_hr():
eq_('<html><head></head><body><div>Hi<div>there</div></div></body></html>',
eq_('<html><body><div>Hi<div>there</div></div></body></html>',
RE_WHITESPACE.sub(
'', quotations.extract_from_html(REPLY_SEPARATED_BY_HR)))
def test_from_block_and_quotations_in_separate_divs():
msg_body = '''
Reply
<div>
<hr/>
<div>
<font>
<b>From: bob@example.com</b>
<b>Date: Thu, 24 Mar 2016 08:07:12 -0700</b>
</font>
</div>
<div>
Quoted message
</div>
</div>
'''
eq_('<html><head></head><body>Reply<div><hr></div></body></html>',
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
RE_REPLY = re.compile(r"^Hi\. I am fine\.\s*\n\s*Thanks,\s*\n\s*Alex\s*$")
def extract_reply_and_check(filename):
import sys
kwargs = {}
if sys.version_info > (3, 0):
kwargs["encoding"] = "utf8"
f = open(filename)
f = open(filename, **kwargs)
msg_body = f.read()
msg_body = f.read().decode("utf-8")
reply = quotations.extract_from_html(msg_body)
plain_reply = u.html_to_text(reply)
eq_(RE_WHITESPACE.sub('', "Hi. I am fine.\n\nThanks,\nAlex"),
RE_WHITESPACE.sub('', plain_reply))
h = html2text.HTML2Text()
h.body_width = 0
plain_reply = h.handle(reply)
#remove &nbsp; spaces
plain_reply = plain_reply.replace(u'\xa0', u' ')
if RE_REPLY.match(plain_reply):
eq_(1, 1)
else:
eq_("Hi. I am fine.\n\nThanks,\nAlex", plain_reply)
def test_gmail_reply():
@@ -342,10 +282,6 @@ def test_ms_outlook_2007_reply():
extract_reply_and_check("tests/fixtures/html_replies/ms_outlook_2007.html")
def test_ms_outlook_2010_reply():
extract_reply_and_check("tests/fixtures/html_replies/ms_outlook_2010.html")
def test_thunderbird_reply():
extract_reply_and_check("tests/fixtures/html_replies/thunderbird.html")
@@ -356,82 +292,3 @@ def test_windows_mail_reply():
def test_yandex_ru_reply():
extract_reply_and_check("tests/fixtures/html_replies/yandex_ru.html")
def test_CRLF():
"""CR is not converted to '&#13;'
"""
symbol = '&#13;'
extracted = quotations.extract_from_html('<html>\r\n</html>')
assert_false(symbol in extracted)
eq_('<html></html>', RE_WHITESPACE.sub('', extracted))
msg_body = """My
reply
<blockquote>
<div>
On 11-Apr-2011, at 6:54 PM, Bob &lt;bob@example.com&gt; wrote:
</div>
<div>
Test
</div>
</blockquote>"""
msg_body = msg_body.replace('\n', '\r\n')
extracted = quotations.extract_from_html(msg_body)
assert_false(symbol in extracted)
# Keep new lines otherwise "My reply" becomes one word - "Myreply"
eq_("<html><head></head><body>My\nreply\n</body></html>", extracted)
def test_gmail_forwarded_msg():
msg_body = """<div dir="ltr"><br><div class="gmail_quote">---------- Forwarded message ----------<br>From: <b class="gmail_sendername">Bob</b> <span dir="ltr">&lt;<a href="mailto:bob@example.com">bob@example.com</a>&gt;</span><br>Date: Fri, Feb 11, 2010 at 5:59 PM<br>Subject: Bob WFH today<br>To: Mary &lt;<a href="mailto:mary@example.com">mary@example.com</a>&gt;<br><br><br><div dir="ltr">eom</div>
</div><br></div>"""
extracted = quotations.extract_from_html(msg_body)
eq_(RE_WHITESPACE.sub('', msg_body), RE_WHITESPACE.sub('', extracted))
def test_readable_html_empty():
msg_body = """
<blockquote>
Reply
<div>
On 11-Apr-2011, at 6:54 PM, Bob &lt;bob@example.com&gt; wrote:
</div>
<div>
Test
</div>
</blockquote>"""
eq_(RE_WHITESPACE.sub('', msg_body),
RE_WHITESPACE.sub('', quotations.extract_from_html(msg_body)))
@patch.object(quotations, 'html_document_fromstring', Mock(return_value=None))
def test_bad_html():
bad_html = "<html></html>"
eq_(bad_html, quotations.extract_from_html(bad_html))
def test_remove_namespaces():
msg_body = """
<html xmlns:o="urn:schemas-microsoft-com:office:office" xmlns="http://www.w3.org/TR/REC-html40">
<body>
<o:p>Dear Sir,</o:p>
<o:p>Thank you for the email.</o:p>
<blockquote>thing</blockquote>
</body>
</html>
"""
rendered = quotations.extract_from_html(msg_body)
assert_true("<p>" in rendered)
assert_true("xmlns" in rendered)
assert_true("<o:p>" not in rendered)
assert_true("<xmlns:o>" not in rendered)

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from . import *
from . fixtures import *

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .. import *
from talon.signature import bruteforce

View File

@@ -1,15 +1,13 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .. import *
import os
from six.moves import range
from talon.signature import bruteforce, extraction, extract
from talon.signature import extraction as e
from talon.signature.learning import dataset
from .. import *
from talon import signature
from talon.signature import extraction as e
from talon.signature import bruteforce
def test_message_shorter_SIGNATURE_MAX_LINES():
@@ -18,28 +16,23 @@ def test_message_shorter_SIGNATURE_MAX_LINES():
Thanks in advance,
Bob"""
text, extracted_signature = extract(body, sender)
text, extracted_signature = signature.extract(body, sender)
eq_('\n'.join(body.splitlines()[:2]), text)
eq_('\n'.join(body.splitlines()[-2:]), extracted_signature)
def test_messages_longer_SIGNATURE_MAX_LINES():
import sys
kwargs = {}
if sys.version_info > (3, 0):
kwargs["encoding"] = "utf8"
for filename in os.listdir(STRIPPED):
filename = os.path.join(STRIPPED, filename)
if not filename.endswith('_body'):
continue
sender, body = dataset.parse_msg_sender(filename)
text, extracted_signature = extract(body, sender)
text, extracted_signature = signature.extract(body, sender)
extracted_signature = extracted_signature or ''
with open(filename[:-len('body')] + 'signature', **kwargs) as ms:
with open(filename[:-len('body')] + 'signature') as ms:
msg_signature = ms.read()
eq_(msg_signature.strip(), extracted_signature.strip())
stripped_msg = body.strip()[:len(body.strip()) - len(msg_signature)]
stripped_msg = body.strip()[:len(body.strip())-len(msg_signature)]
eq_(stripped_msg.strip(), text.strip())
@@ -52,7 +45,7 @@ Thanks in advance,
some text which doesn't seem to be a signature at all
Bob"""
text, extracted_signature = extract(body, sender)
text, extracted_signature = signature.extract(body, sender)
eq_('\n'.join(body.splitlines()[:2]), text)
eq_('\n'.join(body.splitlines()[-3:]), extracted_signature)
@@ -65,7 +58,7 @@ Thanks in advance,
some long text here which doesn't seem to be a signature at all
Bob"""
text, extracted_signature = extract(body, sender)
text, extracted_signature = signature.extract(body, sender)
eq_('\n'.join(body.splitlines()[:-1]), text)
eq_('Bob', extracted_signature)
@@ -73,38 +66,13 @@ Bob"""
some *long* text here which doesn't seem to be a signature at all
"""
((body, None), extract(body, "david@example.com"))
((body, None), signature.extract(body, "david@example.com"))
def test_basic():
msg_body = 'Blah\r\n--\r\n\r\nSergey Obukhov'
eq_(('Blah', '--\r\n\r\nSergey Obukhov'),
extract(msg_body, 'Sergey'))
def test_capitalized():
msg_body = """Hi Mary,
Do you still need a DJ for your wedding? I've included a video demo of one of our DJs available for your wedding date.
DJ Doe
http://example.com
Password: SUPERPASSWORD
Would you like to check out more?
At your service,
John Smith
Doe Inc
555-531-7967"""
sig = """John Smith
Doe Inc
555-531-7967"""
eq_(sig, extract(msg_body, 'Doe')[1])
signature.extract(msg_body, 'Sergey'))
def test_over_2_text_lines_after_signature():
@@ -115,25 +83,25 @@ def test_over_2_text_lines_after_signature():
2 non signature lines in the end
It's not signature
"""
text, extracted_signature = extract(body, "Bob")
text, extracted_signature = signature.extract(body, "Bob")
eq_(extracted_signature, None)
def test_no_signature():
sender, body = "bob@foo.bar", "Hello"
eq_((body, None), extract(body, sender))
eq_((body, None), signature.extract(body, sender))
def test_handles_unicode():
sender, body = dataset.parse_msg_sender(UNICODE_MSG)
text, extracted_signature = extract(body, sender)
text, extracted_signature = signature.extract(body, sender)
@patch.object(extraction, 'has_signature')
@patch.object(signature.extraction, 'has_signature')
def test_signature_extract_crash(has_signature):
has_signature.side_effect = Exception('Bam!')
msg_body = u'Blah\r\n--\r\n\r\nСергей'
eq_((msg_body, None), extract(msg_body, 'Сергей'))
eq_((msg_body, None), signature.extract(msg_body, 'Сергей'))
def test_mark_lines():
@@ -142,37 +110,37 @@ def test_mark_lines():
# (starting from the bottom) because we don't count empty line
eq_('ttset',
e._mark_lines(['Bob Smith',
'Bob Smith',
'Bob Smith',
'',
'some text'], 'Bob Smith'))
'Bob Smith',
'Bob Smith',
'',
'some text'], 'Bob Smith'))
with patch.object(bruteforce, 'SIGNATURE_MAX_LINES', 3):
# we don't analyse the 1st line because
# signature cant start from the 1st line
eq_('tset',
e._mark_lines(['Bob Smith',
'Bob Smith',
'',
'some text'], 'Bob Smith'))
'Bob Smith',
'',
'some text'], 'Bob Smith'))
def test_process_marked_lines():
# no signature found
eq_((list(range(5)), None), e._process_marked_lines(list(range(5)), 'telt'))
eq_((range(5), None), e._process_marked_lines(range(5), 'telt'))
# signature in the middle of the text
eq_((list(range(9)), None), e._process_marked_lines(list(range(9)), 'tesestelt'))
eq_((range(9), None), e._process_marked_lines(range(9), 'tesestelt'))
# long line splits signature
eq_((list(range(7)), [7, 8]),
e._process_marked_lines(list(range(9)), 'tsslsless'))
eq_((range(7), [7, 8]),
e._process_marked_lines(range(9), 'tsslsless'))
eq_((list(range(20)), [20]),
e._process_marked_lines(list(range(21)), 'ttttttstttesllelelets'))
eq_((range(20), [20]),
e._process_marked_lines(range(21), 'ttttttstttesllelelets'))
# some signature lines could be identified as text
eq_(([0], list(range(1, 9))), e._process_marked_lines(list(range(9)), 'tsetetest'))
eq_(([0], range(1, 9)), e._process_marked_lines(range(9), 'tsetetest'))
eq_(([], list(range(5))),
e._process_marked_lines(list(range(5)), "ststt"))
eq_(([], range(5)),
e._process_marked_lines(range(5), "ststt"))

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ... import *
import os

View File

@@ -1,15 +1,12 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ... import *
from talon.signature.learning import featurespace as fs
def test_apply_features():
s = '''This is John Doe
Tuesday @3pm suits. I'll chat to you then.
s = '''John Doe
VP Research and Development, Xxxx Xxxx Xxxxx
@@ -22,12 +19,11 @@ john@example.com'''
# note that we don't consider the first line because signatures don't
# usually take all the text, empty lines are not considered
eq_(result, [[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
with patch.object(fs, 'SIGNATURE_MAX_LINES', 5):
with patch.object(fs, 'SIGNATURE_MAX_LINES', 4):
features = fs.features(sender)
new_result = fs.apply_features(s, features)
# result remains the same because we don't consider empty lines

View File

@@ -1,13 +1,11 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from ... import *
import regex as re
from talon.signature.learning import helpers as h
from talon.signature.learning.helpers import *
from six.moves import range
# First testing regex constants.
VALID = '''
@@ -156,7 +154,7 @@ def test_extract_names():
# check that extracted names could be compiled
try:
re.compile("|".join(extracted_names))
except Exception as e:
except Exception, e:
ok_(False, ("Failed to compile extracted names {}"
"\n\nReason: {}").format(extracted_names, e))
if expected_names:
@@ -192,11 +190,10 @@ def test_punctuation_percent(categories_percent):
def test_capitalized_words_percent():
eq_(0.0, h.capitalized_words_percent(''))
eq_(100.0, h.capitalized_words_percent('Example Corp'))
eq_(50.0, h.capitalized_words_percent('Qqq qqq Aqs 123 sss'))
eq_(50.0, h.capitalized_words_percent('Qqq qqq QQQ 123 sss'))
eq_(100.0, h.capitalized_words_percent('Cell 713-444-7368'))
eq_(100.0, h.capitalized_words_percent('8th Floor'))
eq_(0.0, h.capitalized_words_percent('(212) 230-9276'))
eq_(50.0, h.capitalized_words_percent('Password: REMARKABLE'))
def test_has_signature():
@@ -207,7 +204,7 @@ def test_has_signature():
'sender@example.com'))
assert_false(h.has_signature('http://www.example.com/555-555-5555',
'sender@example.com'))
long_line = ''.join(['q' for e in range(28)])
long_line = ''.join(['q' for e in xrange(28)])
assert_false(h.has_signature(long_line + ' sender', 'sender@example.com'))
# wont crash on an empty string
assert_false(h.has_signature('', ''))

View File

@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from . import *
from . fixtures import *
@@ -8,19 +7,16 @@ import os
import email.iterators
from talon import quotations
import six
from six.moves import range
from six import StringIO
@patch.object(quotations, 'MAX_LINES_COUNT', 1)
def test_too_many_lines():
msg_body = """Test reply
Hi
-----Original Message-----
Test"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_wrote():
@@ -28,32 +24,6 @@ def test_pattern_on_date_somebody_wrote():
On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> wrote:
>
> Test
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_polymail():
msg_body = """Test reply
On Tue, Apr 11, 2017 at 10:07 PM John Smith
<
mailto:John Smith <johnsmith@gmail.com>
> wrote:
Test quoted data
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_sent_from_samsung_smb_wrote():
msg_body = """Test reply
Sent from Samsung MobileName <address@example.com> wrote:
>
> Test
>
@@ -67,7 +37,7 @@ def test_pattern_on_date_wrote_somebody():
"""Lorem
Op 13-02-2014 3:18 schreef Julius Caesar <pantheon@rome.com>:
Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse.
"""))
@@ -84,18 +54,6 @@ On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_date_time_email_splitter():
msg_body = """Test reply
2014-10-17 11:28 GMT+03:00 Postmaster <
postmaster@sandboxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.mailgun.org>:
> First from site
>
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_wrote_allows_space_in_front():
msg_body = """Thanks Thanmai
On Mar 8, 2012 9:59 AM, "Example.com" <
@@ -119,38 +77,6 @@ On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> sent:
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_appointment():
msg_body = """Response
10/19/2017 @ 9:30 am for physical therapy
Bla
1517 4th Avenue Ste 300
London CA 19129, 555-421-6780
John Doe, FCLS
Mailgun Inc
555-941-0697
From: from@example.com [mailto:from@example.com]
Sent: Wednesday, October 18, 2017 2:05 PM
To: John Doer - SIU <jd@example.com>
Subject: RE: Claim # 5551188-1
Text"""
expected = """Response
10/19/2017 @ 9:30 am for physical therapy
Bla
1517 4th Avenue Ste 300
London CA 19129, 555-421-6780
John Doe, FCLS
Mailgun Inc
555-941-0697"""
eq_(expected, quotations.extract_from_plain(msg_body))
def test_line_starts_with_on():
msg_body = """Blah-blah-blah
On blah-blah-blah"""
@@ -187,8 +113,7 @@ def _check_pattern_original_message(original_message_indicator):
-----{}-----
Test"""
eq_('Test reply', quotations.extract_from_plain(
msg_body.format(six.text_type(original_message_indicator))))
eq_('Test reply', quotations.extract_from_plain(msg_body.format(unicode(original_message_indicator))))
def test_english_original_message():
_check_pattern_original_message('Original Message')
@@ -211,17 +136,6 @@ Test reply"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_android_wrote():
msg_body = """Test reply
---- John Smith wrote ----
> quoted
> text
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_reply_wraps_quotations():
msg_body = """Test reply
@@ -301,7 +215,7 @@ def test_with_indent():
------On 12/29/1987 17:32 PM, Julius Caesar wrote-----
Brunch mumblecore pug Marfa tofu, irure taxidermy hoodie readymade pariatur.
Brunch mumblecore pug Marfa tofu, irure taxidermy hoodie readymade pariatur.
"""
eq_("YOLO salvia cillum kogi typewriter mumblecore cardigan skateboard Austin.", quotations.extract_from_plain(msg_body))
@@ -397,50 +311,15 @@ Emne: The manager has commented on your Loop
Blah-blah-blah
"""))
def test_swedish_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain(
u"""Allo! Follow up MIME!
Från: Anno Sportel [mailto:anno.spoel@hsbcssad.com]
Skickat: den 26 augusti 2015 14:45
Till: Isacson Leiff
Ämne: RE: Week 36
Blah-blah-blah
"""))
def test_swedish_from_line():
eq_('Lorem', quotations.extract_from_plain(
"""Lorem
Den 14 september, 2015 02:23:18, Valentino Rudy (valentino@rudy.be) skrev:
Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse.
"""))
def test_norwegian_from_line():
eq_('Lorem', quotations.extract_from_plain(
u"""Lorem
På 14 september 2015 på 02:23:18, Valentino Rudy (valentino@rudy.be) skrev:
Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse.
"""))
def test_dutch_from_block():
eq_('Gluten-free culpa lo-fi et nesciunt nostrud.', quotations.extract_from_plain(
"""Gluten-free culpa lo-fi et nesciunt nostrud.
"""Gluten-free culpa lo-fi et nesciunt nostrud.
Op 17-feb.-2015, om 13:18 heeft Julius Caesar <pantheon@rome.com> het volgende geschreven:
Small batch beard laboris tempor, non listicle hella Tumblr heirloom.
Small batch beard laboris tempor, non listicle hella Tumblr heirloom.
"""))
def test_vietnamese_from_block():
eq_('Hello', quotations.extract_from_plain(
u"""Hello
Vào 14:24 8 tháng 6, 2017, Hùng Nguyễn <hungnguyen@xxx.com> đã viết:
> Xin chào
"""))
def test_quotation_marker_false_positive():
msg_body = """Visit us now for assistance...
@@ -453,8 +332,7 @@ def test_link_closed_with_quotation_marker_on_new_line():
msg_body = '''8.45am-1pm
From: somebody@example.com
Date: Wed, 16 May 2012 00:15:02 -0600
<http://email.example.com/c/dHJhY2tpbmdfY29kZT1mMDdjYzBmNzM1ZjYzMGIxNT
> <bob@example.com <mailto:bob@example.com> >
@@ -495,9 +373,7 @@ def test_from_block_starts_with_date():
msg_body = """Blah
Date: Wed, 16 May 2012 00:15:02 -0600
To: klizhentas@example.com
"""
To: klizhentas@example.com"""
eq_('Blah', quotations.extract_from_plain(msg_body))
@@ -567,12 +443,11 @@ def test_mark_message_lines():
# next line should be marked as splitter
'_____________',
'From: foo@bar.com',
'Date: Wed, 16 May 2012 00:15:02 -0600',
'',
'> Hi',
'',
'Signature']
eq_('tesssemet', quotations.mark_message_lines(lines))
eq_('tessemet', quotations.mark_message_lines(lines))
lines = ['Just testing the email reply',
'',
@@ -735,15 +610,6 @@ def test_preprocess_postprocess_2_links():
eq_(msg_body, quotations.extract_from_plain(msg_body))
def body_iterator(msg, decode=False):
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, six.text_type):
yield payload
else:
yield payload.decode('utf8')
def test_standard_replies():
for filename in os.listdir(STANDARD_REPLIES):
filename = os.path.join(STANDARD_REPLIES, filename)
@@ -751,8 +617,8 @@ def test_standard_replies():
continue
with open(filename) as f:
message = email.message_from_file(f)
body = next(email.iterators.typed_subpart_iterator(message, subtype='plain'))
text = ''.join(body_iterator(body, True))
body = email.iterators.typed_subpart_iterator(message, subtype='plain').next()
text = ''.join(email.iterators.body_line_iterator(body, True))
stripped_text = quotations.extract_from_plain(text)
reply_text_fn = filename[:-4] + '_reply_text'
@@ -765,77 +631,3 @@ def test_standard_replies():
"'%(reply)s' != %(stripped)s for %(fn)s" % \
{'reply': reply_text, 'stripped': stripped_text,
'fn': filename}
def test_split_email():
msg = """From: Mr. X
Date: 24 February 2016
To: Mr. Y
Subject: Hi
Attachments: none
Goodbye.
From: Mr. Y
To: Mr. X
Date: 24 February 2016
Subject: Hi
Attachments: none
Hello.
On 24th February 2016 at 09.32am, Conal wrote:
Hey!
On Mon, 2016-10-03 at 09:45 -0600, Stangel, Dan wrote:
> Mohan,
>
> We have not yet migrated the systems.
>
> Dan
>
> > -----Original Message-----
> > Date: Mon, 2 Apr 2012 17:44:22 +0400
> > Subject: Test
> > From: bob@xxx.mailgun.org
> > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
> >
> > Hi
> >
> > > From: bob@xxx.mailgun.org
> > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
> > > Date: Mon, 2 Apr 2012 17:44:22 +0400
> > > Subject: Test
> > > Hi
> > >
> >
>
>
"""
expected_markers = "stttttsttttetesetesmmmmmmsmmmmmmmmmmmmmmmm"
markers = quotations.split_emails(msg)
eq_(markers, expected_markers)
def test_feedback_below_left_unparsed():
msg_body = """Please enter your feedback below. Thank you.
------------------------------------- Enter Feedback Below -------------------------------------
The user experience was unparallelled. Please continue production. I'm sending payment to ensure
that this line is intact."""
parsed = quotations.extract_from_plain(msg_body)
eq_(msg_body, parsed)
def test_appointment_2():
msg_body = """Invitation for an interview:
Date: Wednesday 3, October 2011
Time: 7 : 00am
Address: 130 Fox St
Please bring in your ID."""
parsed = quotations.extract_from_plain(msg_body)
eq_(msg_body, parsed)

View File

@@ -1,71 +1,9 @@
# coding:utf-8
from __future__ import absolute_import
from talon import utils as u
from . import *
from talon import utils
def test_get_delimiter():
eq_('\r\n', u.get_delimiter('abc\r\n123'))
eq_('\n', u.get_delimiter('abc\n123'))
eq_('\n', u.get_delimiter('abc'))
def test_html_to_text():
html = """<body>
<p>Hello world!</p>
<br>
<ul>
<li>One!</li>
<li>Two</li>
</ul>
<p>
Haha
</p>
</body>"""
text = u.html_to_text(html)
eq_("Hello world! \n\n * One! \n * Two \nHaha", text)
eq_(u"привет!", u.html_to_text("<b>привет!</b>"))
html = '<body><br/><br/>Hi</body>'
eq_('Hi', u.html_to_text(html))
html = """Hi
<style type="text/css">
div, p, li {
font: 13px 'Lucida Grande', Arial, sans-serif;
}
</style>
<style type="text/css">
h1 {
font: 13px 'Lucida Grande', Arial, sans-serif;
}
</style>"""
eq_('Hi', u.html_to_text(html))
html = """<div>
<!-- COMMENT 1 -->
<span>TEXT 1</span>
<p>TEXT 2 <!-- COMMENT 2 --></p>
</div>"""
eq_('TEXT 1 \nTEXT 2', u.html_to_text(html))
def test_comment_no_parent():
s = '<!-- COMMENT 1 --> no comment'
d = u.html_document_fromstring(s)
eq_("no comment", u.html_tree_to_text(d))
@patch.object(u, 'html_fromstring', Mock(return_value=None))
def test_bad_html_to_text():
bad_html = "one<br>two<br>three"
eq_(None, u.html_to_text(bad_html))
eq_('\r\n', utils.get_delimiter('abc\r\n123'))
eq_('\n', utils.get_delimiter('abc\n123'))
eq_('\n', utils.get_delimiter('abc'))

View File

@@ -1,4 +1,3 @@
from __future__ import absolute_import
from talon.signature import EXTRACTOR_FILENAME, EXTRACTOR_DATA
from talon.signature.learning.classifier import train, init