How to Develop a 1D Generative Adversarial Network From Scratch in PyTorch (Part 2)
Goal¶
This post is the 2nd part of "How to develop a 1d GAN from scratch in PyTorch", inspired by the blog "Machine Learning Mastery - How to Develop a 1D Generative Adversarial Network From Scratch in Keras" written by Jason Brownlee, PhD. But to learn step-by-step, I will describe the same concept with PyTorch.
This post will cover the followings:
Part 2:
- Train a Discriminator model
Reference
Libraries¶
In [94]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# PyTorch
import torch
from torch import nn
from torch.functional import F
from torch import optim
from torchviz import make_dot
Functions¶
Defined funcions in Part 1¶
In [91]:
# Target function
def f(x):
return x **2
# Create a discriminator model
# Build a feed-forward network
# define the standalone discriminator model
def define_discriminator(n_inputs=2):
model = nn.Sequential(nn.Linear(2, 25),
nn.ReLU(),
nn.Linear(25,2),
)
# Loss
criterion = nn.BCELoss()
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=0.001)
return model, criterion, optimizer
# Generate samples
def generate_samples(size=100, label='real'):
"""Generate samples with real or fake label
"""
x = np.random.randn(size, 1)
x2 = f(x)
y = np.ones((size, 1)) * (label == 'real')
return np.hstack([x, x2]), y
Train a discriminator¶
In [92]:
predicted
Out[92]:
In [97]:
pred
Out[97]:
In [101]:
# Parameters
n_batch = 100
epochs = 100
half_batch = int(n_batch / 2)
labels = ['real', 'fake']
d_X = {}
d_y = {}
d_accuracy = {}
model, criterion, optimizer = define_discriminator()
# run epochs manually
for i in range(epochs):
for label in labels:
print(f'processing label: {label}')
# generate real examples
d_X[label], d_y[label] = generate_samples(half_batch, label=label)
# update model
# model.train_on_batch(d_data[label])
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(torch.Tensor(d_X[label]))
_, predicted = torch.max(outputs, 1)
# pred = torch.sigmoid(outputs)
_, pred = torch.max(outputs, 1)
loss = F.binary_cross_entropy(pred.view(-1, 1), torch.Tensor(d_y[label]))
# loss = criterion(predicted.view(-1, 1).double(), torch.Tensor(d_y[label]).view(-1, 1).double())
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
# # generate fake examples
# X_fake, y_fake = generate_samples(half_batch, label='fake')
# # update model
# model.train_on_batch(X_fake, y_fake)
# # evaluate the model
# _, acc_real = model.evaluate(X_real, y_real, verbose=0)
# _, acc_fake = model.evaluate(X_fake, y_fake, verbose=0)
# print(i, acc_real, acc_fake)
In [ ]:
In [ ]:
In [ ]:
In [ ]:
Comments
Comments powered by Disqus