五月综合激情婷婷六月,日韩欧美国产一区不卡,他扒开我内裤强吻我下面视频 ,无套内射无矿码免费看黄,天天躁,日日躁,狠狠躁

新聞動(dòng)態(tài)

pytorch 如何使用batch訓(xùn)練lstm網(wǎng)絡(luò)

發(fā)布日期:2022-04-01 10:39 | 文章來源:站長之家

batch的lstm

# 導(dǎo)入相應(yīng)的包
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as Data 
torch.manual_seed(1) 
 
# 準(zhǔn)備數(shù)據(jù)的階段
def prepare_sequence(seq, to_ix):
 idxs = [to_ix[w] for w in seq]
 return torch.tensor(idxs, dtype=torch.long)
  
with open("/home/lstm_train.txt", encoding='utf8') as f:
 train_data = []
 word = []
 label = []
 data = f.readline().strip()
 while data:
  data = data.strip()
  SP = data.split(' ')
  if len(SP) == 2:
word.append(SP[0])
label.append(SP[1])
  else:
if len(word) == 100 and 'I-PRO' in label:
 train_data.append((word, label))
word = []
label = []
  data = f.readline()
 
word_to_ix = {}
for sent, _ in train_data:
 for word in sent:
  if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
 
tag_to_ix = {"O": 0, "I-PRO": 1}
for i in range(len(train_data)):
 train_data[i] = ([word_to_ix[t] for t in train_data[i][0]], [tag_to_ix[t] for t in train_data[i][1]])
 
# 詞向量的維度
EMBEDDING_DIM = 128
 
# 隱藏層的單元數(shù)
HIDDEN_DIM = 128
 
# 批大小
batch_size = 10  
class LSTMTagger(nn.Module):
 
 def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size, batch_size):
  super(LSTMTagger, self).__init__()
  self.hidden_dim = hidden_dim
  self.batch_size = batch_size
  self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
 
  # The LSTM takes word embeddings as inputs, and outputs hidden states
  # with dimensionality hidden_dim.
  self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)
 
  # The linear layer that maps from hidden state space to tag space
  self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
 
 def forward(self, sentence):
  embeds = self.word_embeddings(sentence)
  # input_tensor = embeds.view(self.batch_size, len(sentence) // self.batch_size, -1)
  lstm_out, _ = self.lstm(embeds)
  tag_space = self.hidden2tag(lstm_out)
  scores = F.log_softmax(tag_space, dim=2)
  return scores
 
 def predict(self, sentence):
  embeds = self.word_embeddings(sentence)
  lstm_out, _ = self.lstm(embeds)
  tag_space = self.hidden2tag(lstm_out)
  scores = F.log_softmax(tag_space, dim=2)
  return scores 
 
loss_function = nn.NLLLoss()
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix), batch_size)
optimizer = optim.SGD(model.parameters(), lr=0.1)
 
data_set_word = []
data_set_label = []
for data_tuple in train_data:
 data_set_word.append(data_tuple[0])
 data_set_label.append(data_tuple[1])
torch_dataset = Data.TensorDataset(torch.tensor(data_set_word, dtype=torch.long), torch.tensor(data_set_label, dtype=torch.long))
# 把 dataset 放入 DataLoader
loader = Data.DataLoader(
 dataset=torch_dataset,  # torch TensorDataset format
 batch_size=batch_size,  # mini batch size
 shuffle=True,  #
 num_workers=2,  # 多線程來讀數(shù)據(jù)
)
 
# 訓(xùn)練過程
for epoch in range(200):
 for step, (batch_x, batch_y) in enumerate(loader):
  # 梯度清零
  model.zero_grad()
  tag_scores = model(batch_x)
 
  # 計(jì)算損失
  tag_scores = tag_scores.view(-1, tag_scores.shape[2])
  batch_y = batch_y.view(batch_y.shape[0]*batch_y.shape[1])
  loss = loss_function(tag_scores, batch_y)
  print(loss)
  # 后向傳播
  loss.backward()
 
  # 更新參數(shù)
  optimizer.step()
 
# 測試過程
with torch.no_grad():
 inputs = torch.tensor([data_set_word[0]], dtype=torch.long)
 print(inputs)
 tag_scores = model.predict(inputs)
 print(tag_scores.shape)
 print(torch.argmax(tag_scores, dim=2))

補(bǔ)充:PyTorch基礎(chǔ)-使用LSTM神經(jīng)網(wǎng)絡(luò)實(shí)現(xiàn)手寫數(shù)據(jù)集識(shí)別

看代碼吧~

import numpy as np
import torch
from torch import nn,optim
from torch.autograd import Variable
from torchvision import datasets,transforms
from torch.utils.data import DataLoader
# 訓(xùn)練集
train_data = datasets.MNIST(root="./", # 存放位置
train = True, # 載入訓(xùn)練集
transform=transforms.ToTensor(), # 把數(shù)據(jù)變成tensor類型
download = True # 下載)
# 測試集
test_data = datasets.MNIST(root="./",
train = False,
transform=transforms.ToTensor(),
download = True)
# 批次大小
batch_size = 64
# 裝載訓(xùn)練集
train_loader = DataLoader(dataset=train_data,batch_size=batch_size,shuffle=True)
# 裝載測試集
test_loader = DataLoader(dataset=test_data,batch_size=batch_size,shuffle=True)
for i,data in enumerate(train_loader):
 inputs,labels = data
 print(inputs.shape)
 print(labels.shape)
 break
# 定義網(wǎng)絡(luò)結(jié)構(gòu)
class LSTM(nn.Module):
 def __init__(self):
  super(LSTM,self).__init__()# 初始化
  self.lstm = torch.nn.LSTM(
input_size = 28, # 表示輸入特征的大小
hidden_size = 64, # 表示lstm模塊的數(shù)量
num_layers = 1, # 表示lstm隱藏層的層數(shù)
batch_first = True # lstm默認(rèn)格式input(seq_len,batch,feature)等于True表示input和output變成(batch,seq_len,feature)
  )
  self.out = torch.nn.Linear(in_features=64,out_features=10)
  self.softmax = torch.nn.Softmax(dim=1)
 def forward(self,x):
  # (batch,seq_len,feature)
  x = x.view(-1,28,28)
  # output:(batch,seq_len,hidden_size)包含每個(gè)序列的輸出結(jié)果
  # 雖然lstm的batch_first為True,但是h_n,c_n的第0個(gè)維度還是num_layers
  # h_n :[num_layers,batch,hidden_size]只包含最后一個(gè)序列的輸出結(jié)果
  # c_n:[num_layers,batch,hidden_size]只包含最后一個(gè)序列的輸出結(jié)果
  output,(h_n,c_n) = self.lstm(x)
  output_in_last_timestep = h_n[-1,:,:]
  x = self.out(output_in_last_timestep)
  x = self.softmax(x)
  return x
# 定義模型
model = LSTM()
# 定義代價(jià)函數(shù)
mse_loss = nn.CrossEntropyLoss()# 交叉熵
# 定義優(yōu)化器
optimizer = optim.Adam(model.parameters(),lr=0.001)# 隨機(jī)梯度下降
# 定義模型訓(xùn)練和測試的方法
def train():
 # 模型的訓(xùn)練狀態(tài)
 model.train()
 for i,data in enumerate(train_loader):
  # 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽
  inputs,labels = data
  # 獲得模型預(yù)測結(jié)果(64,10)
  out = model(inputs)
  # 交叉熵代價(jià)函數(shù)out(batch,C:類別的數(shù)量),labels(batch)
  loss = mse_loss(out,labels)
  # 梯度清零
  optimizer.zero_grad()
  # 計(jì)算梯度
  loss.backward()
  # 修改權(quán)值
  optimizer.step()
  
def test():
 # 模型的測試狀態(tài)
 model.eval()
 correct = 0 # 測試集準(zhǔn)確率
 for i,data in enumerate(test_loader):
  # 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽
  inputs,labels = data
  # 獲得模型預(yù)測結(jié)果(64,10)
  out = model(inputs)
  # 獲得最大值,以及最大值所在的位置
  _,predicted = torch.max(out,1)
  # 預(yù)測正確的數(shù)量
  correct += (predicted==labels).sum()
 print("Test acc:{0}".format(correct.item()/len(test_data)))
 
 correct = 0
 for i,data in enumerate(train_loader): # 訓(xùn)練集準(zhǔn)確率
  # 獲得一個(gè)批次的數(shù)據(jù)和標(biāo)簽
  inputs,labels = data
  # 獲得模型預(yù)測結(jié)果(64,10)
  out = model(inputs)
  # 獲得最大值,以及最大值所在的位置
  _,predicted = torch.max(out,1)
  # 預(yù)測正確的數(shù)量
  correct += (predicted==labels).sum()
 print("Train acc:{0}".format(correct.item()/len(train_data)))
# 訓(xùn)練
for epoch in range(10):
 print("epoch:",epoch)
 train()
 test()

以上為個(gè)人經(jīng)驗(yàn),希望能給大家一個(gè)參考,也希望大家多多支持本站。

海外服務(wù)器租用

版權(quán)聲明:本站文章來源標(biāo)注為YINGSOO的內(nèi)容版權(quán)均為本站所有,歡迎引用、轉(zhuǎn)載,請(qǐng)保持原文完整并注明來源及原文鏈接。禁止復(fù)制或仿造本網(wǎng)站,禁止在非maisonbaluchon.cn所屬的服務(wù)器上建立鏡像,否則將依法追究法律責(zé)任。本站部分內(nèi)容來源于網(wǎng)友推薦、互聯(lián)網(wǎng)收集整理而來,僅供學(xué)習(xí)參考,不代表本站立場,如有內(nèi)容涉嫌侵權(quán),請(qǐng)聯(lián)系alex-e#qq.com處理。

相關(guān)文章

實(shí)時(shí)開通

自選配置、實(shí)時(shí)開通

免備案

全球線路精選!

全天候客戶服務(wù)

7x24全年不間斷在線

專屬顧問服務(wù)

1對(duì)1客戶咨詢顧問

在線
客服

在線客服:7*24小時(shí)在線

客服
熱線

400-630-3752
7*24小時(shí)客服服務(wù)熱線

關(guān)注
微信

關(guān)注官方微信
頂部