How to Develop a 1D Generative Adversarial Network From Scratch in PyTorch (Part 2)

Goal

This post is the 2nd part of "How to develop a 1d GAN from scratch in PyTorch", inspired by the blog "Machine Learning Mastery - How to Develop a 1D Generative Adversarial Network From Scratch in Keras" written by Jason Brownlee, PhD. But to learn step-by-step, I will describe the same concept with PyTorch.

This post will cover the followings:

Part 2:

  • Train a Discriminator model

Reference

Libraries

In [94]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline

# PyTorch
import torch
from torch import nn
from torch.functional import F 
from torch import optim
from torchviz import make_dot

Functions

Defined funcions in Part 1

In [91]:
# Target function
def f(x):
    return x **2

# Create a discriminator model
# Build a feed-forward network


# define the standalone discriminator model
def define_discriminator(n_inputs=2):
    model = nn.Sequential(nn.Linear(2, 25),
                          nn.ReLU(),
                          nn.Linear(25,2),
                         )
    
    # Loss
    criterion = nn.BCELoss()
    
    

    # Optimizer
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    
    return model, criterion, optimizer




# Generate samples
def generate_samples(size=100, label='real'):
    """Generate samples with real or fake label
    """
    x = np.random.randn(size, 1)
    x2 = f(x)
    
    y = np.ones((size, 1)) * (label == 'real')
    return np.hstack([x, x2]), y
    

Train a discriminator

In [92]:
predicted
Out[92]:
tensor([0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1,
        1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0,
        1, 1])
In [97]:
pred
Out[97]:
tensor([[0.3894, 0.5372],
        [0.3957, 0.5342],
        [0.3676, 0.5435],
        [0.3657, 0.5557],
        [0.4467, 0.5787],
        [0.4409, 0.5637],
        [0.3665, 0.5453],
        [0.4364, 0.6085],
        [0.3539, 0.5852],
        [0.3881, 0.5252],
        [0.4279, 0.5502],
        [0.3708, 0.5621],
        [0.4193, 0.5413],
        [0.3652, 0.5547],
        [0.3911, 0.5338],
        [0.4207, 0.5430],
        [0.4118, 0.5368],
        [0.4456, 0.5914],
        [0.4063, 0.5358],
        [0.3798, 0.5349],
        [0.4364, 0.5584],
        [0.4035, 0.5353],
        [0.3653, 0.5547],
        [0.3659, 0.5461],
        [0.3673, 0.5582],
        [0.4308, 0.5530],
        [0.4217, 0.5444],
        [0.4018, 0.5350],
        [0.4178, 0.6270],
        [0.3862, 0.5342],
        [0.4315, 0.5537],
        [0.3670, 0.5444],
        [0.3283, 0.3935],
        [0.3949, 0.5342],
        [0.3681, 0.5429],
        [0.3679, 0.5590],
        [0.4154, 0.6273],
        [0.4467, 0.5785],
        [0.3706, 0.5619],
        [0.3683, 0.5595],
        [0.4466, 0.5848],
        [0.3655, 0.5468],
        [0.3916, 0.5339],
        [0.3809, 0.5348],
        [0.3835, 0.5556],
        [0.3752, 0.5633],
        [0.4465, 0.5860],
        [0.3710, 0.5622],
        [0.4467, 0.5778],
        [0.4253, 0.5479]], grad_fn=<SigmoidBackward>)
In [101]:
# Parameters
n_batch = 100
epochs = 100

half_batch = int(n_batch / 2)
labels = ['real', 'fake']
d_X = {}
d_y = {}
d_accuracy = {}
model, criterion, optimizer = define_discriminator()
# run epochs manually
for i in range(epochs):
    
    for label in labels:
        print(f'processing label: {label}')
        # generate real examples
        d_X[label], d_y[label] = generate_samples(half_batch, label=label)
#         update model
#         model.train_on_batch(d_data[label])
        # zero the parameter gradients
        optimizer.zero_grad()
        # forward + backward + optimize
        outputs = model(torch.Tensor(d_X[label]))
        _, predicted = torch.max(outputs, 1)

#         pred = torch.sigmoid(outputs)
        _, pred = torch.max(outputs, 1)
        loss = F.binary_cross_entropy(pred.view(-1, 1), torch.Tensor(d_y[label]))

#         loss = criterion(predicted.view(-1, 1).double(), torch.Tensor(d_y[label]).view(-1, 1).double())
        loss.backward()
        optimizer.step()

        # print statistics
        running_loss += loss.item()
        
#         # generate fake examples
#         X_fake, y_fake = generate_samples(half_batch, label='fake')
#         # update model
#         model.train_on_batch(X_fake, y_fake)
#         # evaluate the model
#         _, acc_real = model.evaluate(X_real, y_real, verbose=0)
#         _, acc_fake = model.evaluate(X_fake, y_fake, verbose=0)
#     print(i, acc_real, acc_fake)
processing label: real
---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-101-2e0f70b78835> in <module>
     26 #         pred = torch.sigmoid(outputs)
     27         _, pred = torch.max(outputs, 1)
---> 28         loss = F.binary_cross_entropy(pred.view(-1, 1), torch.Tensor(d_y[label]))
     29 
     30 #         loss = criterion(predicted.view(-1, 1).double(), torch.Tensor(d_y[label]).view(-1, 1).double())

~/anaconda3/envs/py367/lib/python3.6/site-packages/torch/nn/functional.py in binary_cross_entropy(input, target, weight, size_average, reduce, reduction)
   2025 
   2026     return torch._C._nn.binary_cross_entropy(
-> 2027         input, target, weight, reduction_enum)
   2028 
   2029 

RuntimeError: _thnn_binary_cross_entropy_forward is not implemented for type torch.LongTensor
In [ ]:
 
In [ ]:
 
In [ ]:
 
In [ ]:
 

Comments

Comments powered by Disqus