1
Fork 0
mirror of https://github.com/Steffo99/unimore-bda-6.git synced 2024-11-25 17:24:20 +00:00
bda-6-steffo/unimore_bda_6/tokenizer/lower.py
2023-02-08 19:46:05 +01:00

17 lines
498 B
Python

import tensorflow
from .base import BaseTokenizer
class LowercaseTokenizer(BaseTokenizer):
"""
Tokenizer which converts the words to lowercase before splitting them via spaces.
"""
def tokenize_plain(self, text: str) -> list[str]:
return text.lower().split()
def tokenize_tensorflow(self, text: tensorflow.Tensor) -> tensorflow.Tensor:
text = tensorflow.strings.lower(text)
text = tensorflow.expand_dims(text, -1, name="tokens")
return text