Building basic synthesis of neural network

Building basic synthesis of neural network

The following is based on the torch website and don't worry about python learning

Basic steps

  1. Load data, training set, prediction set, dimension set
  2. Build a network, i.e. class Net
  3. Instantiation net
  4. Create optimizer
  5. Determine loss function
  6. Start training
    • Calculate the predict ed value
    • Calculate loss function value
    • Optimizer zerograd()
    • loss.backward()
    • Optimizer step optimizer.step()
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.utils.data as Data
torch.manual_seed(1)

# fake data. y = 2.863* x^2 + 5.652 * x + 3.423
x = torch.unsqueeze(torch.linspace(-5, 5, 5000), dim=1)
y = 2.863 * x.pow(2) + 5.652 * x + 3.423 * (torch.rand(x.size()) - 0.5)

# plt.scatter(x.numpy(), y.numpy())
# plt.show()

# See batch training
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
    dataset=torch_dataset,
    batch_size=100,
    shuffle=True,
    num_workers=2,
)


# create Net
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(1, 20)
        self.predict = torch.nn.Linear(20, 1)

    def forward(self, x):
        x = F.relu(self.hidden(x))
        x = self.predict(x)
        return x


if __name__ == '__main__':
    # Instantiate network objects
    net_SGD = Net()
    net_Momentum = Net()
    net_RMSProp = Net()
    net_Adam = Net()
    nets = [net_SGD, net_Momentum, net_RMSProp, net_Adam]

    # Create optimizer
    opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=0.01)
    opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=0.01, momentum=0.8)
    opt_RMSProp = torch.optim.RMSprop(net_RMSProp.parameters(), lr=0.01, alpha=0.9)
    opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=0.01, betas=(0.9, 0.99))
    optimizer = [opt_SGD, opt_Momentum, opt_RMSProp, opt_Adam]

    # Create loss function
    loss_func = torch.nn.MSELoss()
    loss_net = [[], [], [], []]

    for epoch in range(100): 
        # Complete data training 100 times
        for step, (batch_x, batch_y) in enumerate(loader):
            # Take 100 samples each time for training
            for net, opt, loss_opt in zip(nets, optimizer, loss_net):
                # Four networks for training
                predict = net(batch_x)
                loss = loss_func(predict, batch_y)
                opt.zero_grad()
                loss.backward()
                opt.step()
                loss_opt.append(loss.data.numpy()) # Store the value of the loss function

    labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
    for i, l_his in enumerate(loss_net):
        plt.plot(l_his, label=labels[i])
    plt.legend(loc='best')
    plt.xlabel('Steps')
    plt.ylabel('Loss')
    plt.ylim((0, 1100))
    plt.show()


# The following is the method of not using batch
    # for t in range(1000):
    #     for net, opt, loss_opt in zip(nets, optimizer, loss_net):
    #         predict = net(x)
    #         loss = loss_func(predict, y)
    #         opt.zero_grad()
    #         loss.backward()
    #         opt.step()
    #         loss_opt.append(loss.data.numpy())

    # labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
    # for i, l_his in enumerate(loss_net):
    #     plt.plot(l_his, label=labels[i])
    # plt.legend(loc='best')
    # plt.xlabel('Steps')
    # plt.ylabel('Loss')
    # plt.ylim((0, 1100))
    # plt.show()

Published 12 original articles, won praise 2, visited 3215
Private letter follow

Tags: network Python

Posted on Sat, 21 Mar 2020 07:59:11 -0700 by patriklko