An adaptation of Finetune transformers models with pytorch lightning tutorial using Habana Gaudi AI processors.
This notebook will use HuggingFace’s datasets library to get data, which will be wrapped in a LightningDataModule. Then, we write a class to perform text classification on any dataset from the GLUE Benchmark. (We just show CoLA and MRPC due to constraint on compute/disk)
Setup
This notebook requires some packages besides pytorch-lightning.
! pip install --quiet "datasets" "scikit-learn" "scipy" "transformers"
WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
Code language: JavaScript (javascript)
[notice] A new release of pip available: 22.3 -> 22.3.1
[notice] To update, run: python3 -m pip install --upgrade pip
from datetime import datetime
from typing import Optional
import os
import datasets
import torch
from pytorch_lightning import LightningDataModule, LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks.progress import TQDMProgressBar
from torch.utils.data import DataLoader
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
get_linear_schedule_with_warmup,
)
/usr/local/lib/python3.8/dist-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
Code language: JavaScript (javascript)
Training BERT with Lightning
Lightning DataModule for GLUE
class GLUEDataModule(LightningDataModule):
task_text_field_map = {
"cola": ["sentence"],
"sst2": ["sentence"],
"mrpc": ["sentence1", "sentence2"],
"qqp": ["question1", "question2"],
"stsb": ["sentence1", "sentence2"],
"mnli": ["premise", "hypothesis"],
"qnli": ["question", "sentence"],
"rte": ["sentence1", "sentence2"],
"wnli": ["sentence1", "sentence2"],
"ax": ["premise", "hypothesis"],
}
glue_task_num_labels = {
"cola": 2,
"sst2": 2,
"mrpc": 2,
"qqp": 2,
"stsb": 1,
"mnli": 3,
"qnli": 2,
"rte": 2,
"wnli": 2,
"ax": 3,
}
loader_columns = [
"datasets_idx",
"input_ids",
"token_type_ids",
"attention_mask",
"start_positions",
"end_positions",
"labels",
]
def __init__(
self,
model_name_or_path: str,
task_name: str = "mrpc",
max_seq_length: int = 128,
train_batch_size: int = 32,
eval_batch_size: int = 32,
**kwargs,
):
super().__init__()
self.model_name_or_path = model_name_or_path
self.task_name = task_name
self.max_seq_length = max_seq_length
self.train_batch_size = train_batch_size
self.eval_batch_size = eval_batch_size
self.text_fields = self.task_text_field_map[task_name]
self.num_labels = self.glue_task_num_labels[task_name]
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True)
def setup(self, stage: str):
self.dataset = datasets.load_dataset("glue", self.task_name)
for split in self.dataset.keys():
self.dataset[split] = self.dataset[split].map(
self.convert_to_features,
batched=True,
remove_columns=["label"],
)
self.columns = .column_names if c in self.loader_columns]
self.dataset[split].set_format(type="torch", columns=self.columns)
self.eval_splits = [x for x in self.dataset.keys() if "validation" in x]
def prepare_data(self):
datasets.load_dataset("glue", self.task_name)
AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True)
def train_dataloader(self):
return DataLoader(self.dataset["train"], batch_size=self.train_batch_size)
def val_dataloader(self):
if len(self.eval_splits) == 1:
return DataLoader(self.dataset["validation"], batch_size=self.eval_batch_size)
elif len(self.eval_splits) > 1:
return [DataLoader(self.dataset[x], batch_size=self.eval_batch_size) for x in self.eval_splits]
def test_dataloader(self):
if len(self.eval_splits) == 1:
return DataLoader(self.dataset["test"], batch_size=self.eval_batch_size)
elif len(self.eval_splits) > 1:
return [DataLoader(self.dataset[x], batch_size=self.eval_batch_size) for x in self.eval_splits]
def convert_to_features(self, example_batch, indices=None):
# Either encode single sentence or sentence pairs
if len(self.text_fields) > 1:
texts_or_text_pairs = list(zip(example_batch[self.text_fields[0]], example_batch[self.text_fields[1]]))
else:
texts_or_text_pairs = example_batch[self.text_fields[0]]
# Tokenize the text/text pairs
features = self.tokenizer.batch_encode_plus(
texts_or_text_pairs, max_length=self.max_seq_length, pad_to_max_length=True, truncation=True
)
# Rename label to labels to make it easier to pass to model forward
features["labels"] = example_batch["label"]
return features
Prepare the data using datamodule
dm = GLUEDataModule("distilbert-base-uncased")
dm.prepare_data()
dm.setup("fit")
next(iter(dm.train_dataloader()))
Downloading: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28.0/28.0 [00:00<00:00, 29.6kB/s]
Downloading: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 483/483 [00:00<00:00, 330kB/s]
Downloading: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 232k/232k [00:00<00:00, 731kB/s]
Downloading: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 466k/466k [00:00<00:00, 1.22MB/s]
Downloading builder script: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████| 28.8k/28.8k [00:00<00:00, 477kB/s]
Downloading metadata: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 28.7k/28.7k [00:00<00:00, 476kB/s]
Downloading readme: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 27.8k/27.8k [00:00<00:00, 445kB/s]
Downloading and preparing dataset glue/mrpc to /root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad...
Downloading data files: 0%| | 0/3 [00:00, ?it/s]
Downloading data: 6.22kB [00:00, 2.48MB/s]
Downloading data files: 33%|███████████████████████████████████████ | 1/3 [00:00<00:00, 2.86it/s]
Downloading data: 1.05MB [00:00, 17.0MB/s]
Downloading data files: 67%|██████████████████████████████████████████████████████████████████████████████ | 2/3 [00:00<00:00, 2.97it/s]
Downloading data: 441kB [00:00, 9.76MB/s]
Downloading data files: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 3.17it/s]
Dataset glue downloaded and prepared to /root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad. Subsequent calls will reuse this data.
Code language: JavaScript (javascript)
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 960.09it/s]
Found cached dataset glue (/root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 1071.43it/s]
0%| | 0/4 [00:00, ?ba/s]/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py:2304: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).
warnings.warn(
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 12.76ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 46.18ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 23.85ba/s]
Code language: JavaScript (javascript)
{'input_ids': tensor([[ 101, 2572, 3217, ..., 0, 0, 0],
[ 101, 9805, 3540, ..., 0, 0, 0],
[ 101, 2027, 2018, ..., 0, 0, 0],
...,
[ 101, 1996, 2922, ..., 0, 0, 0],
[ 101, 6202, 1999, ..., 0, 0, 0],
[ 101, 16565, 2566, ..., 0, 0, 0]]),
'attention_mask': tensor([[1, 1, 1, ..., 0, 0, 0],
[1, 1, 1, ..., 0, 0, 0],
[1, 1, 1, ..., 0, 0, 0],
...,
[1, 1, 1, ..., 0, 0, 0],
[1, 1, 1, ..., 0, 0, 0],
[1, 1, 1, ..., 0, 0, 0]]),
'labels': tensor([1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1,
1, 1, 0, 0, 1, 1, 1, 0])}
Code language: JavaScript (javascript)
Transformer LightningModule
class GLUETransformer(LightningModule):
def __init__(
self,
model_name_or_path: str,
num_labels: int,
task_name: str,
learning_rate: float = 2e-5,
adam_epsilon: float = 1e-8,
warmup_steps: int = 0,
weight_decay: float = 0.0,
train_batch_size: int = 32,
eval_batch_size: int = 32,
eval_splits: Optional[list] = None,
**kwargs,
):
super().__init__()
self.save_hyperparameters()
self.config = AutoConfig.from_pretrained(model_name_or_path, num_labels=num_labels)
self.model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, config=self.config)
self.metric = datasets.load_metric(
"glue", self.hparams.task_name, experiment_id=datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
)
def forward(self, **inputs):
return self.model(**inputs)
def training_step(self, batch, batch_idx):
outputs = self(**batch)
loss = outputs[0]
return loss
def validation_step(self, batch, batch_idx, dataloader_idx=0):
outputs = self(**batch)
val_loss, logits = outputs[:2]
if self.hparams.num_labels >= 1:
preds = torch.argmax(logits, axis=1)
elif self.hparams.num_labels == 1:
preds = logits.squeeze()
labels = batch["labels"]
return {"loss": val_loss, "preds": preds, "labels": labels}
def validation_epoch_end(self, outputs):
if self.hparams.task_name == "mnli":
for i, output in enumerate(outputs):
# matched or mismatched
split = self.hparams.eval_splits[i].split("_")[-1]
preds = torch.cat([x["preds"] for x in output]).detach().cpu().numpy()
labels = torch.cat([x["labels"] for x in output]).detach().cpu().numpy()
loss = torch.stack([x["loss"] for x in output]).mean()
self.log(f"val_loss_{split}", loss, prog_bar=True)
split_metrics = {
f"{k}_{split}": v for k, v in self.metric.compute(predictions=preds, references=labels).items()
}
self.log_dict(split_metrics, prog_bar=True)
return loss
preds = torch.cat([x["preds"] for x in outputs]).detach().cpu().numpy()
labels = torch.cat([x["labels"] for x in outputs]).detach().cpu().numpy()
loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("val_loss", loss, prog_bar=True)
self.log_dict(self.metric.compute(predictions=preds, references=labels), prog_bar=True)
return loss
def setup(self, stage=None) -> None:
if stage != "fit":
return
# Get dataloader by calling it - train_dataloader() is called after setup() by default
train_loader = self.trainer.datamodule.train_dataloader()
# Calculate total steps
tb_size = self.hparams.train_batch_size
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) // tb_size) // ab_size
def configure_optimizers(self):
"""Prepare optimizer and schedule (linear warmup and decay)"""
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.total_steps,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
Training
CoLA
See an interactive view of the CoLA dataset in NLP Viewer
seed_everything(42)
dm = GLUEDataModule(model_name_or_path="albert-base-v2", task_name="cola")
dm.setup("fit")
model = GLUETransformer(
model_name_or_path="albert-base-v2",
num_labels=dm.num_labels,
eval_splits=dm.eval_splits,
task_name=dm.task_name,
)
trainer = Trainer(max_epochs=1, accelerator='hpu', devices=1,callbacks=[TQDMProgressBar(refresh_rate=20)],)
trainer.fit(model, datamodule=dm)
Global seed set to 42
Downloading: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 684/684 [00:00<00:00, 786kB/s]
Downloading: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 760k/760k [00:00<00:00, 1.70MB/s]
Downloading: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1.31M/1.31M [00:00<00:00, 2.67MB/s]
Code language: JavaScript (javascript)
Downloading and preparing dataset glue/cola to /root/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad...
Downloading data: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 377k/377k [00:00<00:00, 6.98MB/s]
Dataset glue downloaded and prepared to /root/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad. Subsequent calls will reuse this data.
Code language: JavaScript (javascript)
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 1093.98it/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9/9 [00:00<00:00, 17.75ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 30.14ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 29.89ba/s]
Downloading: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 47.4M/47.4M [00:01<00:00, 42.2MB/s]
Some weights of the model checkpoint at albert-base-v2 were not used when initializing AlbertForSequenceClassification: ['predictions.bias', 'predictions.decoder.bias', 'predictions.dense.bias', 'predictions.dense.weight', 'predictions.decoder.weight', 'predictions.LayerNorm.weight', 'predictions.LayerNorm.bias']
- This IS expected if you are initializing AlbertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing AlbertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of AlbertForSequenceClassification were not initialized from the model checkpoint at albert-base-v2 and are newly initialized: ['classifier.weight', 'classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
/tmp/ipykernel_120/1213150618.py:22: FutureWarning: load_metric is deprecated and will be removed in the next major version of datasets. Use 'evaluate.load' instead, from the new library 🤗 Evaluate: https://huggingface.co/docs/evaluate
self.metric = datasets.load_metric(
Downloading builder script: 5.76kB [00:00, 2.55MB/s]
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: True, using: 1 HPUs
Code language: JavaScript (javascript)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
Code language: JavaScript (javascript)
Found cached dataset glue (/root/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 890.38it/s]
Missing logger folder: /root/gaudi-tutorials/Lightning/Finetune Transformers/lightning_logs
Found cached dataset glue (/root/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 1007.04it/s]
0%| | 0/9 [00:00, ?ba/s]/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py:2304: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).
warnings.warn(
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9/9 [00:00<00:00, 16.32ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 28.46ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 29.16ba/s]
=============================HABANA PT BRIDGE CONFIGURATION ===========================
PT_HPU_LAZY_MODE = 1
PT_HPU_LAZY_EAGER_OPTIM_CACHE = 1
PT_HPU_ENABLE_COMPILE_THREAD = 0
PT_HPU_ENABLE_EXECUTION_THREAD = 1
PT_HPU_ENABLE_LAZY_EAGER_EXECUTION_THREAD = 1
PT_ENABLE_INTER_HOST_CACHING = 0
PT_ENABLE_INFERENCE_MODE = 1
PT_ENABLE_HABANA_CACHING = 1
PT_HPU_MAX_RECIPE_SUBMISSION_LIMIT = 0
PT_HPU_MAX_COMPOUND_OP_SIZE = 9223372036854775807
PT_HPU_MAX_COMPOUND_OP_SIZE_SS = 10
PT_HPU_ENABLE_STAGE_SUBMISSION = 1
PT_HPU_PGM_ENABLE_CACHE = 1
PT_HPU_ENABLE_LAZY_COLLECTIVES = 0
PT_HCCL_SLICE_SIZE_MB = 16
PT_HCCL_MEMORY_ALLOWANCE_MB = 384
PT_HPU_INITIAL_WORKSPACE_SIZE = 0
PT_HABANA_POOL_SIZE = 24
PT_HPU_POOL_STRATEGY = 5
PT_HPU_POOL_LOG_FRAGMENTATION_INFO = 0
PT_ENABLE_MEMORY_DEFRAGMENTATION = 1
PT_ENABLE_DEFRAGMENTATION_INFO = 0
PT_HPU_ENABLE_SYNAPSE_LAYOUT_HANDLING = 1
PT_HPU_ENABLE_SYNAPSE_OUTPUT_PERMUTE = 1
PT_HPU_ENABLE_VALID_DATA_RANGE_CHECK = 1
PT_HPU_FORCE_USE_DEFAULT_STREAM = 0
PT_RECIPE_CACHE_PATH =
PT_HPU_ENABLE_REFINE_DYNAMIC_SHAPES = 0
PT_HPU_DYNAMIC_MIN_POLICY_ORDER = 3,1
PT_HPU_DYNAMIC_MAX_POLICY_ORDER = 2,3,1
=============================SYSTEM CONFIGURATION =========================================
Num CPU Cores = 96
CPU RAM = 784300912 KB
============================================================================================
/usr/local/lib/python3.8/dist-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning
warnings.warn(
| Name | Type | Params
----------------------------------------------------------
0 | model | AlbertForSequenceClassification | 11.7 M
----------------------------------------------------------
11.7 M Trainable params
0 Non-trainable params
11.7 M Total params
46.740 Total estimated model params size (MB)
Code language: JavaScript (javascript)
Sanity Checking DataLoader 0: 0%| | 0/2 [00:00, ?it/s]
/usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/data_connector.py:236: PossibleUserWarning: The dataloader, val_dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 96 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
rank_zero_warn(
Code language: JavaScript (javascript)
/usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/data_connector.py:236: PossibleUserWarning: The dataloader, train_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 96 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
rank_zero_warn(
Code language: JavaScript (javascript)
Epoch 0: 86%|████████████████████████████████████████████████████████████████████████████████████████████▍ | 260/301 [03:52<00:36, 1.12it/s, loss=0.491, v_num=0]
Validation: 0it [00:00, ?it/s]
Validation: 0%| | 0/33 [00:00, ?it/s]
Epoch 0: 93%|███████████████████████████████████████████████████████████████████████████████████████████████████▌ | 280/301 [03:59<00:17, 1.17it/s, loss=0.491, v_num=0]
Epoch 0: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████▋| 300/301 [04:00<00:00, 1.25it/s, loss=0.491, v_num=0]
Epoch 0: 100%|███████████████████████████████████████████████████████████████| 301/301 [04:02<00:00, 1.24it/s, loss=0.491, v_num=0, val_loss=0.512, matthews_correlation=0.404]
Epoch 0: 100%|███████████████████████████████████████████████████████████████| 301/301 [04:02<00:00, 1.24it/s, loss=0.491, v_num=0, val_loss=0.512, matthews_correlation=0.404]
`Trainer.fit` stopped: `max_epochs=1` reached.
Code language: JavaScript (javascript)
Epoch 0: 100%|███████████████████████████████████████████████████████████████| 301/301 [04:03<00:00, 1.23it/s, loss=0.491, v_num=0, val_loss=0.512, matthews_correlation=0.404]
Code language: HTML, XML (xml)
MRPC
See an interactive view of the MRPC dataset in NLP Viewer
seed_everything(42)
dm = GLUEDataModule(
model_name_or_path="distilbert-base-cased",
task_name="mrpc",
)
dm.setup("fit")
model = GLUETransformer(
model_name_or_path="distilbert-base-cased",
num_labels=dm.num_labels,
eval_splits=dm.eval_splits,
task_name=dm.task_name,
)
trainer = Trainer(max_epochs=3, accelerator='hpu', devices=1,callbacks=[TQDMProgressBar(refresh_rate=20)],)
trainer.fit(model, datamodule=dm)
Global seed set to 42
Found cached dataset glue (/root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 953.18it/s]
Loading cached processed dataset at /root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad/cache-206454e8a1601057.arrow
0%| | 0/1 [00:00, ?ba/s]/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py:2304: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).
warnings.warn(
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 32.41ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 20.61ba/s]
Some weights of the model checkpoint at distilbert-base-cased were not used when initializing DistilBertForSequenceClassification: ['vocab_layer_norm.weight', 'vocab_transform.weight', 'vocab_layer_norm.bias', 'vocab_transform.bias', 'vocab_projector.bias', 'vocab_projector.weight']
- This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-cased and are newly initialized: ['pre_classifier.weight', 'classifier.weight', 'pre_classifier.bias', 'classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: True, using: 1 HPUs
Code language: PHP (php)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
Code language: JavaScript (javascript)
Found cached dataset glue (/root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 990.70it/s]
Found cached dataset glue (/root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 1206.07it/s]
0%| | 0/4 [00:00, ?ba/s]/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py:2304: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).
warnings.warn(
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 18.20ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 33.07ba/s]
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 2/2 [00:00<00:00, 18.59ba/s]
/usr/local/lib/python3.8/dist-packages/transformers/optimization.py:306: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning
warnings.warn(
| Name | Type | Params
--------------------------------------------------------------
0 | model | DistilBertForSequenceClassification | 65.8 M
--------------------------------------------------------------
65.8 M Trainable params
0 Non-trainable params
65.8 M Total params
263.132 Total estimated model params size (MB)
Code language: JavaScript (javascript)
/usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/data_connector.py:236: PossibleUserWarning: The dataloader, val_dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 96 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
rank_zero_warn(
/usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/data_connector.py:236: PossibleUserWarning: The dataloader, train_dataloader, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 96 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
rank_zero_warn(
Code language: JavaScript (javascript)
Epoch 0: 78%|████████████████████████████████████████████████████████████████████████████████████▍ | 100/128 [00:09<00:02, 10.78it/s, loss=0.59, v_num=2]
Validation: 0it [00:00, ?it/s]
Validation: 0%| | 0/13 [00:00, ?it/s]
Epoch 0: 94%|█████████████████████████████████████████████████████████████████████████████████████████████████████▎ | 120/128 [00:09<00:00, 12.04it/s, loss=0.59, v_num=2]
Epoch 0: 100%|█████████████████████████████████████████████████████████████████| 128/128 [00:11<00:00, 11.58it/s, loss=0.612, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Epoch 1: 78%|███████████████████████████████████████████████████▌ | 100/128 [00:09<00:02, 10.78it/s, loss=0.59, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Validation: 0it [00:00, ?it/s]
Validation: 0%| | 0/13 [00:00, ?it/s]
Epoch 1: 94%|█████████████████████████████████████████████████████████████▉ | 120/128 [00:09<00:00, 12.05it/s, loss=0.59, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Epoch 1: 100%|█████████████████████████████████████████████████████████████████| 128/128 [00:11<00:00, 11.59it/s, loss=0.611, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Epoch 2: 78%|██████████████████████████████████████████████████▊ | 100/128 [00:09<00:02, 10.78it/s, loss=0.589, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Validation: 0it [00:00, ?it/s]
Validation: 0%| | 0/13 [00:00, ?it/s]
Epoch 2: 94%|████████████████████████████████████████████████████████████▉ | 120/128 [00:09<00:00, 12.05it/s, loss=0.589, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Epoch 2: 100%|██████████████████████████████████████████████████████████████████| 128/128 [00:11<00:00, 11.59it/s, loss=0.61, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Epoch 2: 100%|██████████████████████████████████████████████████████████████████| 128/128 [00:11<00:00, 11.59it/s, loss=0.61, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
`Trainer.fit` stopped: `max_epochs=3` reached.
Code language: JavaScript (javascript)
Epoch 2: 100%|██████████████████████████████████████████████████████████████████| 128/128 [00:13<00:00, 9.64it/s, loss=0.61, v_num=2, val_loss=0.613, accuracy=0.684, f1=0.812]
Code language: HTML, XML (xml)
MNLI
The MNLI dataset is huge, so we aren’t going to bother trying to train on it here.
We will skip over training and go straight to validation.
See an interactive view of the MRPC dataset in NLP Viewer
dm = GLUEDataModule(
model_name_or_path="distilbert-base-cased",
task_name="mnli",
)
dm.setup("fit")
model = GLUETransformer(
model_name_or_path="distilbert-base-cased",
num_labels=dm.num_labels,
eval_splits=dm.eval_splits,
task_name=dm.task_name,
)
trainer = Trainer(accelerator='hpu', devices=1)
trainer.validate(model, dm.val_dataloader())
trainer.logged_metrics
Downloading and preparing dataset glue/mnli to /root/.cache/huggingface/datasets/glue/mnli/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad...
Downloading data: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 313M/313M [00:06<00:00, 50.2MB/s]
Dataset glue downloaded and prepared to /root/.cache/huggingface/datasets/glue/mnli/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad. Subsequent calls will reuse this data.
Code language: JavaScript (javascript)
100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5/5 [00:00<00:00, 455.76it/s]
0%| | 0/393 [00:00, ?ba/s]/usr/local/lib/python3.8/dist-packages/transformers/tokenization_utils_base.py:2304: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).
warnings.warn(
100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 393/393 [00:22<00:00, 17.15ba/s]
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10/10 [00:00<00:00, 19.15ba/s]
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10/10 [00:00<00:00, 19.06ba/s]
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10/10 [00:00<00:00, 16.44ba/s]
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 10/10 [00:00<00:00, 18.97ba/s]
Some weights of the model checkpoint at distilbert-base-cased were not used when initializing DistilBertForSequenceClassification: ['vocab_layer_norm.weight', 'vocab_transform.weight', 'vocab_layer_norm.bias', 'vocab_transform.bias', 'vocab_projector.bias', 'vocab_projector.weight']
- This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-cased and are newly initialized: ['pre_classifier.weight', 'classifier.weight', 'pre_classifier.bias', 'classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
GPU available: False, used: False
TPU available: False, using: 0 TPU cores
IPU available: False, using: 0 IPUs
HPU available: True, using: 1 HPUs
Code language: JavaScript (javascript)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...
To disable this warning, you can either:
- Avoid using `tokenizers` before the fork if possible
- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)
Code language: JavaScript (javascript)
/usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/data_connector.py:236: PossibleUserWarning: The dataloader, val_dataloader 0, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 96 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
rank_zero_warn(
/usr/local/lib/python3.8/dist-packages/pytorch_lightning/trainer/connectors/data_connector.py:236: PossibleUserWarning: The dataloader, val_dataloader 1, does not have many workers which may be a bottleneck. Consider increasing the value of the `num_workers` argument` (try 96 which is the number of cpus on this machine) in the `DataLoader` init to improve performance.
rank_zero_warn(
Code language: JavaScript (javascript)
Validation DataLoader 1: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 308/308 [00:14<00:00, 21.42it/s]
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
Validate metric DataLoader 0 DataLoader 1
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
accuracy_matched 0.32735610008239746 0.32735610008239746
accuracy_mismatched 0.32953619956970215 0.32953619956970215
val_loss_matched 1.103335976600647 1.103335976600647
val_loss_mismatched 1.1029515266418457 1.1029515266418457
────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
{'val_loss_matched': tensor(1.1033),
'accuracy_matched': tensor(0.3274),
'val_loss_mismatched': tensor(1.1030),
'accuracy_mismatched': tensor(0.3295)}
Code language: JavaScript (javascript)
Copyright (c) 2022 Habana Labs, Ltd. an Intel Company.
All rights reserved.
License
Licensed under a CC BY SA 4.0 license.
A derivative of Introduction To PyTorch Lightning by PL Team