第N5周:Pytorch文本分类入门
- 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
- 🍖 原作者:K同学啊
本周任务:
- 了解文本分类的基本流程
- 学习常用数据清洗方法
- 学习如何使用jieba实现英文分词
- 学习如何构建文本向量
前期准备
加载数据
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms, datasets
import os,PIL,pathlib,warnings
warnings.filterwarnings('ignore')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
加载AG News数据集
from torchtext.datasets import AG_NEWS
train_iter = AG_NEWS(split='train')
构建词典
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
tokenizer = get_tokenizer('basic_english')
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=['<unk'])
vocab.set_default_index(vocab['<unk'])
vocab(['here','is','an','example'])
text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: int(x) - 1
text_pipeline('here is the an example')
label_pipeline('10')
生成数据批次和迭代器
from torch.utils.data import DataLoader
def collate_batch(batch):
label_list, text_list, offsets = [],[],[0]
for (_label, _text) in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
text_list = torch.cat(text_list)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
return label_list.to(device), text_list.to(device), offsets.to(device)
datalodaer = DataLoader(train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch)
准备模型
定义模型
from torch import nn
class TextClassificationModel(nn.Module):
def __init__(self, vocab_size, embed_dim, num_class):
super(TextClassificationModel, self).__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange,initrange)
self.fc.weight.data.uniform_(-initrange,initrange)
self.fc.bias.data.zero_()
def forward(self,text,offsets):
embedded = self.embedding(text,offsets)
return self.fc(embedded)
定义实例
num_class = len(set([label for (label,text) in train_iter]))
vocab_size = len(vocab)
em_size = 64
model = TextClassificationModel(vocab_size,em_size,num_class).to(device)
定义训练函数与评估函数
import time
def train(dataloader):
model.train()
total_acc, train_loss, total_count = 0,0,0
log_interval = 500
start_time = time.time()
for idx, (label,text,offsets) in enumerate(dataloader):
predicted_label = model(text, offsets)
optimizer.zero_grad()
loss = criterion(predicted_label, label)
loss.backward()
optimizer.step()
total_acc += (predicted_label.argmax(1) == label).sum().item()
train_loss += loss.item()
total_count += label.size(0)
if idx % log_interval == 0 and idx > 0:
elapsed = time.time() - start_time
print('| epoch {:1d} | {:4d}/{:4d} batches'
'| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),
total_acc/total_count, train_loss/total_count))
total_acc, train_loss, total_count = 0,0,0
start_time = time.time()
def evaluate(dataloader):
model.eval()
total_acc,train_loss, total_count = 0,0,0
with torch.no_grad():
for idx, (label,text,offsets) in enumerate(dataloader):
predicted_label = model(text,offsets)
loss = criterion(predicted_label, label)
total_acc += (predicted_label.argmax(1) == label).sum().item()
train_loss += loss.item()
total_count += label.size(0)
return total_acc/total_count, train_loss/total_count
训练模型
拆分数据集并运行模型
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
EPOCHS = 10
LR = 5
BATCH_SIZE = 64
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None
train_iter, test_iter = AG_NEWS()
train_dataset = to_map_style_dataset(train_iter)
test_dataset = to_map_style_dataset(test_iter)
num_train = int(len(train_dataset) * 0.95)
split_train_, split_valid_ = random_split(train_dataset, [num_train, len(train_dataset) - num_train])
train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)
valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)
for epoch in range(1,EPOCHS + 1):
epoch_start_time = time.time()
train(train_dataloader)
val_acc, val_loss = evaluate(valid_dataloader)
if total_accu is not None and total_accu > val_acc:
scheduler.step()
else:
total_accu = val_acc
print('-' * 69)
print('|epoch {:1d} | time: {:4.2f}s |'
'valid_acc {:4.3f} valid_loss {:4.3f}'.format(epoch, time.time() - epoch_start_time, val_acc, val_loss))
print('-' * 69)
| epoch 1 | 500/1782 batches| train_acc 0.914 train_loss 0.00397
| epoch 1 | 1000/1782 batches| train_acc 0.917 train_loss 0.00385
| epoch 1 | 1500/1782 batches| train_acc 0.913 train_loss 0.00402
---------------------------------------------------------------------
|epoch 1 | time: 9.01s |valid_acc 0.920 valid_loss 0.004
---------------------------------------------------------------------
| epoch 2 | 500/1782 batches| train_acc 0.924 train_loss 0.00356
| epoch 2 | 1000/1782 batches| train_acc 0.925 train_loss 0.00346
| epoch 2 | 1500/1782 batches| train_acc 0.923 train_loss 0.00349
---------------------------------------------------------------------
|epoch 2 | time: 10.16s |valid_acc 0.913 valid_loss 0.004
---------------------------------------------------------------------
| epoch 3 | 500/1782 batches| train_acc 0.941 train_loss 0.00284
| epoch 3 | 1000/1782 batches| train_acc 0.945 train_loss 0.00271
| epoch 3 | 1500/1782 batches| train_acc 0.943 train_loss 0.00273
---------------------------------------------------------------------
|epoch 3 | time: 8.85s |valid_acc 0.924 valid_loss 0.004
---------------------------------------------------------------------
| epoch 4 | 500/1782 batches| train_acc 0.945 train_loss 0.00268
| epoch 4 | 1000/1782 batches| train_acc 0.945 train_loss 0.00267
| epoch 4 | 1500/1782 batches| train_acc 0.946 train_loss 0.00265
---------------------------------------------------------------------
|epoch 4 | time: 8.88s |valid_acc 0.925 valid_loss 0.004
---------------------------------------------------------------------
| epoch 5 | 500/1782 batches| train_acc 0.945 train_loss 0.00269
| epoch 5 | 1000/1782 batches| train_acc 0.948 train_loss 0.00257
| epoch 5 | 1500/1782 batches| train_acc 0.945 train_loss 0.00265
---------------------------------------------------------------------
|epoch 5 | time: 9.23s |valid_acc 0.922 valid_loss 0.004
---------------------------------------------------------------------
| epoch 6 | 500/1782 batches| train_acc 0.948 train_loss 0.00257
| epoch 6 | 1000/1782 batches| train_acc 0.950 train_loss 0.00249
| epoch 6 | 1500/1782 batches| train_acc 0.947 train_loss 0.00259
---------------------------------------------------------------------
|epoch 6 | time: 9.30s |valid_acc 0.925 valid_loss 0.004
---------------------------------------------------------------------
| epoch 7 | 500/1782 batches| train_acc 0.949 train_loss 0.00251
| epoch 7 | 1000/1782 batches| train_acc 0.946 train_loss 0.00264
| epoch 7 | 1500/1782 batches| train_acc 0.950 train_loss 0.00245
---------------------------------------------------------------------
|epoch 7 | time: 8.93s |valid_acc 0.925 valid_loss 0.004
---------------------------------------------------------------------
| epoch 8 | 500/1782 batches| train_acc 0.949 train_loss 0.00251
| epoch 8 | 1000/1782 batches| train_acc 0.946 train_loss 0.00260
| epoch 8 | 1500/1782 batches| train_acc 0.950 train_loss 0.00249
---------------------------------------------------------------------
|epoch 8 | time: 8.79s |valid_acc 0.925 valid_loss 0.004
---------------------------------------------------------------------
| epoch 9 | 500/1782 batches| train_acc 0.947 train_loss 0.00254
| epoch 9 | 1000/1782 batches| train_acc 0.950 train_loss 0.00250
| epoch 9 | 1500/1782 batches| train_acc 0.948 train_loss 0.00258
---------------------------------------------------------------------
|epoch 9 | time: 8.84s |valid_acc 0.925 valid_loss 0.004
---------------------------------------------------------------------
| epoch 10 | 500/1782 batches| train_acc 0.949 train_loss 0.00248
| epoch 10 | 1000/1782 batches| train_acc 0.947 train_loss 0.00256
| epoch 10 | 1500/1782 batches| train_acc 0.951 train_loss 0.00249
---------------------------------------------------------------------
|epoch 10 | time: 9.80s |valid_acc 0.925 valid_loss 0.004
---------------------------------------------------------------------
使用测试数据评估模型
print('Checking the results of test dataset.')
test_acc, test_loss = evaluate(test_dataloader)
print('test accuracy {:8.3f}'.format(test_acc))
Checking the results of test dataset.
test accuracy 0.909
总结
- 文本分类常见流程为
- 准备好原始文本:AG News是广泛用来进行文本分类的数据集
- 文本清洗:AG News属于已经清洗好的数据集
- 分词:torchtext库的get_tokenizer()是用于将文本数据分词的函数,它返回一个分词器函数,可以将一个字符串转换为一个单词的列表
- 文本向量化:其实就是上周的词嵌入过程,这里使用EmbeddingBag方式进行嵌入,将离散的单词映射为固定大小的连续向量。这些向量能较好地捕捉单词间的语义关系。
- 建模:我们定义的是TextClassificationMode模型,它首先对文本进行嵌入,然后对嵌入结果进行均值聚合
原文地址:https://blog.csdn.net/a536723241/article/details/143393096
免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!