方法一:x=torch.randn(3,4,requires_grad=True)
方法二: x=torch.randn(3,4)
x.requires_grad=True
x=torch.rand(1)b=torch.rand(1,requires_grad=True)w=torch.rand(1,requires_grad=True)y=x*wz=y+b#反向传播计算z.backward(retain_graph=True) #若不清零,会自动累加w.gradb.grad
线性回归试水import torchimport numpy as npimport torch.nn as nn#构造一组x和对应标签yx_val=[i for i in range(11)]x_train=np.array(x_val,dtype=np.float32)x_train=x_train.reshape(-1,1)x_train.shapey_val=[2*i+1 for i in x_val]y_train=np.array(y_val,dtype=np.float32)y_train=y_train.reshape(-1,1)y_train.shape#线性回归模型#线性回归可以理解为一个不加激活函数的全连接层class LinearRegressionModel(nn.Module): def __init__(self,input_dim,output_dim): super(LinearRegressionModel,self).__init__() self.linear = nn.Linear(input_dim,output_dim) def forward(self,x): out =self.linear (x) return outinput_dim=1output_dim=1model = LinearRegressionModel (input_dim,output_dim)#指定参数和损失epochs=1000learning_rate=0.01optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate)criterion = nn.MSELoss() #分类用交叉熵,回归用MSE(一般情况下)#训练模型for epoch in range(epochs): epoch += 1 # 注意转行成tensor inputs = torch.from_numpy(x_train) labels = torch.from_numpy(y_train) # 梯度要清零每一次迭代 optimizer.zero_grad() # 前向传播 outputs = model(inputs) # 计算损失 loss = criterion(outputs, labels) # 返向传播 loss.backward() # 更新权重参数 optimizer.step() if epoch % 50 == 0: print('epoch {}, loss {}'.format(epoch, loss.item()))#模型预测结果predicted = model(torch.from_numpy(x_train).requires_grad_()).data.numpy()#模型的保存和读取torch.save(model.state_dict(), 'model.pkl')model.load_state_dict(torch.load('model.pkl'))##GPU训练import torchimport torch.nn as nnimport numpy as npclass LinearRegressionModel(nn.Module): def __init__(self, input_dim, output_dim): super(LinearRegressionModel, self).__init__() self.linear = nn.Linear(input_dim, output_dim) def forward(self, x): out = self.linear(x) return outinput_dim = 1output_dim = 1model = LinearRegressionModel(input_dim, output_dim)device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")model.to(device)criterion = nn.MSELoss()learning_rate = 0.01optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)epochs = 1000for epoch in range(epochs): epoch += 1 inputs = torch.from_numpy(x_train).to(device) labels = torch.from_numpy(y_train).to(device) optimizer.zero_grad() outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() if epoch % 50 == 0: print('epoch {}, loss {}'.format(epoch, loss.item()))
(强推)Pytorch深度学习实战教学_哔哩哔哩_bilibili