import torch import numpy as npimport random
class LinearRegression(torch.nn.Module): def __init__(self): super(LinearRegression, self).__init__() # 一元一次方程,输入输出都为1维 self.linear = torch.nn.modules.Linear(in_features=1, out_features=1)
def forward(self, x): return self.linear(x) def generate_data(batch_size=32): # 产生正态分布的输入 x = torch.randn(batch_size) # 目标函数为:y = 3x + 4 y = 3.0 * x + 4.0 + random.randint(-1, 1)
return (x, y)
def train(epoch=500): # 有CUDA则用之 device = 'cuda' if torch.cuda.is_available() else 'cpu' model = LinearRegression().to(device) loss_func = torch.nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for i in range(epoch): x, y = generate_data() x = x.reshape(-1, 1) y = y.reshape(-1, 1) # 把张量都移到合适的设备上 x = x.to(device) y = y.to(device)
# PyTorch标准套路 predicted = model(x) loss = loss_func(predicted, y) optimizer.zero_grad() loss.backward() optimizer.step()
print(f'Epoch {i+1}, loss {loss.item()}')
# 最终结果 print(f'Weight: {model.linear.weight.data}') print(f'Bias: {model.linear.bias.data}')
if __name__ == '__main__': train(1000)
评论