2018-06-22 12:14:03 +03:00
|
|
|
# coding: utf-8
|
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_ur_tokenizer_handles_long_text(ur_tokenizer):
|
2019-08-04 14:41:18 +03:00
|
|
|
text = """اصل میں، رسوا ہونے کی ہمیں کچھ عادت سی ہو گئی ہے۔"""
|
2018-06-22 12:14:03 +03:00
|
|
|
tokens = ur_tokenizer(text)
|
2019-08-04 14:41:18 +03:00
|
|
|
assert len(tokens) == 14
|
2018-06-22 12:14:03 +03:00
|
|
|
|
|
|
|
|
2018-11-27 03:09:36 +03:00
|
|
|
@pytest.mark.parametrize("text,length", [("تحریر باسط حبیب", 3), ("میرا پاکستان", 2)])
|
2018-07-25 00:38:44 +03:00
|
|
|
def test_ur_tokenizer_handles_cnts(ur_tokenizer, text, length):
|
2018-06-22 12:14:03 +03:00
|
|
|
tokens = ur_tokenizer(text)
|
|
|
|
assert len(tokens) == length
|