1
Fork 0
mirror of https://github.com/Steffo99/unimore-bda-6.git synced 2024-11-23 00:14:19 +00:00
bda-6-steffo/unimore_bda_6/tokenizer/plain.py

17 lines
442 B
Python
Raw Normal View History

2023-02-08 18:46:05 +00:00
import tensorflow
from .base import BaseTokenizer
class PlainTokenizer(BaseTokenizer):
"""
Tokenizer which just splits the text into tokens by separating them at whitespaces.
"""
def tokenize_plain(self, text: str) -> list[str]:
return text.split()
def tokenize_tensorflow(self, text: tensorflow.Tensor) -> tensorflow.Tensor:
text = tensorflow.expand_dims(text, -1, name="tokens")
return text