2023-02-10 04:12:07 +00:00
|
|
|
import tensorflow
|
2023-02-02 03:12:25 +00:00
|
|
|
import re
|
2023-02-02 03:12:56 +00:00
|
|
|
import html.entities
|
2023-02-02 03:17:43 +00:00
|
|
|
import typing as t
|
2023-02-03 22:27:44 +00:00
|
|
|
import nltk.sentiment.util
|
|
|
|
|
|
|
|
from .base import BaseTokenizer
|
2023-02-02 03:12:25 +00:00
|
|
|
|
2023-02-02 16:24:11 +00:00
|
|
|
|
2023-02-03 22:27:44 +00:00
|
|
|
class PottsTokenizer(BaseTokenizer):
|
2023-02-02 16:24:11 +00:00
|
|
|
"""
|
2023-02-10 04:12:07 +00:00
|
|
|
Tokenizer based on `Christopher Potts' tokenizer <http://sentiment.christopherpotts.net/tokenizing.html>`_, released in 2011.
|
|
|
|
|
|
|
|
This module is released under the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License: https://creativecommons.org/licenses/by-nc-sa/3.0/ .
|
2023-02-02 16:24:11 +00:00
|
|
|
"""
|
|
|
|
|
2023-02-10 04:12:07 +00:00
|
|
|
# noinspection RegExpRepeatedSpace
|
|
|
|
# language=pythonregexp
|
|
|
|
emoticon_re_string = r"""
|
|
|
|
[<>]?
|
|
|
|
[:;=8] # eyes
|
|
|
|
[\-o*']? # optional nose
|
|
|
|
[)\](\[dDpP/:}{@|\\] # mouth
|
|
|
|
|
|
|
|
|
[)\](\[dDpP/:}{@|\\] # mouth
|
|
|
|
[\-o*']? # optional nose
|
|
|
|
[:;=8] # eyes
|
|
|
|
[<>]?
|
2023-02-03 22:27:44 +00:00
|
|
|
"""
|
2023-02-10 04:12:07 +00:00
|
|
|
|
|
|
|
emoticon_re = re.compile(emoticon_re_string, re.VERBOSE | re.I)
|
|
|
|
|
|
|
|
# noinspection RegExpRepeatedSpace,RegExpUnnecessaryNonCapturingGroup
|
|
|
|
# language=pythonregexp
|
|
|
|
words_re_string = (
|
|
|
|
# Emoticons:
|
|
|
|
emoticon_re_string
|
|
|
|
,
|
|
|
|
# Phone numbers:
|
|
|
|
r"""
|
|
|
|
(?: # (international)
|
|
|
|
\+?[01]
|
|
|
|
[\-\s.]*
|
|
|
|
)?
|
|
|
|
(?: # (area code)
|
|
|
|
[(]?
|
|
|
|
\d{3}
|
|
|
|
[\-\s.)]*
|
|
|
|
)?
|
|
|
|
\d{3} # exchange
|
|
|
|
[\-\s.]*
|
|
|
|
\d{4} # base
|
|
|
|
"""
|
|
|
|
,
|
|
|
|
# HTML tags:
|
|
|
|
r"""<[^>]+>"""
|
|
|
|
,
|
|
|
|
# Twitter username:
|
|
|
|
r"""@[\w_]+"""
|
|
|
|
,
|
|
|
|
# Twitter hashtags:
|
|
|
|
r"""#+[\w_]+[\w'_\-]*[\w_]+"""
|
|
|
|
,
|
|
|
|
# Words with apostrophes or dashes
|
|
|
|
r"""[a-z][a-z'\-_]+[a-z]"""
|
|
|
|
,
|
|
|
|
# Numbers, including fractions, decimals
|
|
|
|
r"""[+\-]?\d+[,/.:-]\d+[+\-]?"""
|
|
|
|
,
|
|
|
|
# Words without apostrophes or dashes
|
|
|
|
r"""[\w_]+"""
|
|
|
|
,
|
|
|
|
# Ellipsis dots
|
|
|
|
r"""\.(?:\s*\.)+"""
|
|
|
|
,
|
|
|
|
# Everything else that isn't whitespace
|
|
|
|
r"""(?:\S)"""
|
|
|
|
)
|
|
|
|
|
|
|
|
words_re = re.compile("|".join(words_re_string), re.VERBOSE | re.I)
|
|
|
|
|
|
|
|
# language=pythonregexp
|
|
|
|
digit_re_string = r"&#\d+;"
|
|
|
|
|
|
|
|
digit_re = re.compile(digit_re_string, re.VERBOSE)
|
|
|
|
|
|
|
|
# language=pythonregexp
|
|
|
|
alpha_re_string = r"&\w+;"
|
|
|
|
|
|
|
|
alpha_re = re.compile(alpha_re_string, re.VERBOSE)
|
|
|
|
|
|
|
|
amp = "&"
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def __html2string(cls, s: str) -> str:
|
|
|
|
"""
|
|
|
|
Internal metod that seeks to replace all the HTML entities in s with their corresponding characters.
|
2023-02-03 22:27:44 +00:00
|
|
|
"""
|
|
|
|
# First the digits:
|
2023-02-10 04:12:07 +00:00
|
|
|
ents = set(cls.digit_re.findall(s))
|
2023-02-03 22:27:44 +00:00
|
|
|
if len(ents) > 0:
|
|
|
|
for ent in ents:
|
|
|
|
entnum = ent[2:-1]
|
|
|
|
try:
|
|
|
|
entnum = int(entnum)
|
|
|
|
s = s.replace(ent, chr(entnum))
|
|
|
|
except (ValueError, KeyError):
|
|
|
|
pass
|
|
|
|
# Now the alpha versions:
|
2023-02-10 04:12:07 +00:00
|
|
|
ents = set(cls.alpha_re.findall(s))
|
|
|
|
ents = filter((lambda x: x != cls.amp), ents)
|
2023-02-02 03:12:25 +00:00
|
|
|
for ent in ents:
|
2023-02-03 22:27:44 +00:00
|
|
|
entname = ent[1:-1]
|
2023-02-02 16:24:11 +00:00
|
|
|
try:
|
2023-02-03 22:27:44 +00:00
|
|
|
s = s.replace(ent, chr(html.entities.name2codepoint[entname]))
|
|
|
|
except (ValueError, KeyError):
|
2023-02-02 16:24:11 +00:00
|
|
|
pass
|
2023-02-10 04:12:07 +00:00
|
|
|
s = s.replace(cls.amp, " and ")
|
2023-02-03 22:27:44 +00:00
|
|
|
return s
|
|
|
|
|
2023-02-08 18:46:05 +00:00
|
|
|
def tokenize_plain(self, text: str) -> t.Iterable[str]:
|
2023-02-10 04:12:07 +00:00
|
|
|
# Fix HTML character entitites
|
2023-02-03 22:27:44 +00:00
|
|
|
s = self.__html2string(text)
|
2023-02-10 04:12:07 +00:00
|
|
|
# Tokenize
|
|
|
|
words = self.words_re.findall(s)
|
2023-02-03 22:27:44 +00:00
|
|
|
# Possible alter the case, but avoid changing emoticons like :D into :d:
|
2023-02-10 04:12:07 +00:00
|
|
|
words = list(map(lambda x: x if self.emoticon_re.search(x) else x.lower(), words))
|
|
|
|
# Re-join words
|
|
|
|
result = " ".join(words)
|
|
|
|
# Return the result
|
|
|
|
return result
|
2023-02-03 22:27:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
class PottsTokenizerWithNegation(PottsTokenizer):
|
2023-02-08 18:46:05 +00:00
|
|
|
def tokenize_plain(self, text: str) -> t.Iterable[str]:
|
|
|
|
words = super().tokenize_plain(text)
|
2023-02-03 22:27:44 +00:00
|
|
|
nltk.sentiment.util.mark_negation(words, shallow=True)
|
|
|
|
return words
|
2023-02-02 16:24:11 +00:00
|
|
|
|
|
|
|
|
|
|
|
__all__ = (
|
2023-02-03 22:27:44 +00:00
|
|
|
"PottsTokenizer",
|
|
|
|
"PottsTokenizerWithNegation",
|
2023-02-02 16:24:11 +00:00
|
|
|
)
|