import osRoberta model with a streamed dataset (Custom Single Head)
This notebook contains some example of how to train a Roberta-based model with a streamed dataset
In this series, we walk through some of the capability of this library: single-head classification, multi-head classification, multi-label classification, and regression. If you want a more detailed tutorial, check this out
#This will specify a (or a list) of GPUs for training
os.environ['CUDA_VISIBLE_DEVICES'] = "0"from that_nlp_library.text_transformation import *
from that_nlp_library.text_augmentation import *
from that_nlp_library.text_main_streaming import *
from that_nlp_library.utils import seed_everythingfrom underthesea import text_normalize
from functools import partial
from pathlib import Path
import pandas as pd
import numpy as np
import nlpaug.augmenter.char as nac
from datasets import load_dataset
import random
from transformers import RobertaTokenizer
from datasets import DatasetDefine the custom augmentation function
def nlp_aug_stochastic(x,aug=None,p=0.5):
if not isinstance(x,list):
if random.random()<p: return aug.augment(x)[0]
return x
news=[]
originals=[]
for _x in x:
if random.random()<p: news.append(_x)
else: originals.append(_x)
# only perform augmentation when needed
if len(news): news = aug.augment(news)
return news+originalsaug = nac.KeyboardAug(aug_char_max=3,aug_char_p=0.1,aug_word_p=0.07)
nearby_aug_func = partial(nlp_aug_stochastic,aug=aug,p=0.3)Create a TextDataController object
We will reuse the data and the preprocessings in this tutorial
dset = load_dataset('sample_data',data_files=['Womens_Clothing_Reviews.csv'],split='train')
ddict_with_val = dset.train_test_split(test_size=0.1,seed=42)
ddict_with_val['validation'] = ddict_with_val['test']
ddict_with_val['train'] = ddict_with_val['train'].to_iterable_dataset()
del ddict_with_val['test']ddict_with_valDatasetDict({
train: IterableDataset({
features: ['Clothing ID', 'Age', 'Title', 'Review Text', 'Rating', 'Recommended IND', 'Positive Feedback Count', 'Division Name', 'Department Name', 'Class Name'],
n_shards: 1
})
validation: Dataset({
features: ['Clothing ID', 'Age', 'Title', 'Review Text', 'Rating', 'Recommended IND', 'Positive Feedback Count', 'Division Name', 'Department Name', 'Class Name'],
num_rows: 2349
})
})
tdc = TextDataControllerStreaming(ddict_with_val,
main_text='Review Text',
label_names='Department Name',
sup_types='classification',
class_names_predefined=['Bottoms', 'Dresses', 'Intimate', 'Jackets', 'Tops', 'Trending'],
filter_dict={'Review Text': lambda x: x is not None,
'Department Name': lambda x: x is not None,
},
label_tfm_dict={'Department Name': lambda x: x if x!='Trend' else 'Trending'},
metadatas=['Title','Division Name'],
content_transformations=[text_normalize,str.lower],
content_augmentations=[nearby_aug_func,str.lower],
process_metas=True,
batch_size=1000,
num_proc=4,
seed=42,
verbose=False
)Define our tokenizer for Roberta
_tokenizer = RobertaTokenizer.from_pretrained('roberta-base')/home/quan/anaconda3/envs/nlp_dev/lib/python3.10/site-packages/huggingface_hub/file_download.py:1132: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.
warnings.warn(
Process and tokenize our dataset
tdc.process_and_tokenize(_tokenizer,max_length=150)tdc.main_ddictDatasetDict({
train: IterableDataset({
features: Unknown,
n_shards: 4
})
validation: Dataset({
features: ['Title', 'Review Text', 'Division Name', 'Department Name', 'label', 'input_ids', 'attention_mask'],
num_rows: 2253
})
})