CS109B Data Science 2: Advanced Topics in Data Science

Lab 10 - Autoencoders and Variational Autoencoders

Harvard University
Spring 2019
Instructors: Mark Glickman and Pavlos Protopapas


In [1]:
# !pip install imgaug
In [2]:
## load the libraries 
import sys
import warnings
import os
import glob
warnings.filterwarnings("ignore")

import numpy as np
import pandas as pd
import cv2
from sklearn.model_selection import train_test_split

from keras.layers import *
from keras.callbacks import EarlyStopping
from keras.utils import to_categorical
from keras.models import Model, Sequential
from keras.metrics import *
from keras.optimizers import Adam, RMSprop
from scipy.stats import norm
from keras.preprocessing import image

from keras import backend as K

from imgaug import augmenters
import matplotlib.pyplot as plt
plt.gray()
Using TensorFlow backend.

Part 1: Data

Reading data

Download the data given at the following link: . Use pandas and numpy to read in the data as a matrix

In [3]:
### read dataset 
train = pd.read_csv("data/fashion-mnist_train.csv")
train_x = train[list(train.columns)[1:]].values
train_x, val_x = train_test_split(train_x, test_size=0.15)

## create train and validation datasets
train_x, val_x = train_test_split(train_x, test_size=0.15)
In [4]:
## normalize and reshape
train_x = train_x/255.
val_x = val_x/255.

train_x = train_x.reshape(-1, 28, 28, 1)
val_x = val_x.reshape(-1, 28, 28, 1)
In [5]:
train_x.shape
Out[5]:
(43350, 28, 28, 1)

Visualizing Samples

Visualize 10 images from dataset

In [6]:
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i in range(5,10):
    ax[i-5].imshow(train_x[i, :, :, 0].reshape(28, 28))

Part 2: Denoise Images using AEs

Understanding AEs

Autoencoders

Add Noise to Images

Check out imgaug docs for more info and other ways to add noise.

In [7]:
# Lets add sample noise - Salt and Pepper
noise = augmenters.SaltAndPepper(0.1)
seq_object = augmenters.Sequential([noise])

train_x_n = seq_object.augment_images(train_x * 255) / 255
val_x_n = seq_object.augment_images(val_x * 255) / 255
In [8]:
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i in range(5,10):
    ax[i-5].imshow(train_x_n[i, :, :, 0].reshape(28, 28))

Setup Encoder Neural Network

Try different number of hidden layers, nodes?

In [9]:
# input layer
input_layer = Input(shape=(28, 28, 1))

# encoding architecture
encoded_layer1 = Conv2D(64, (3, 3), activation='relu', padding='same')(input_layer)
encoded_layer1 = MaxPool2D( (2, 2), padding='same')(encoded_layer1)
encoded_layer2 = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded_layer1)
encoded_layer2 = MaxPool2D( (2, 2), padding='same')(encoded_layer2)
encoded_layer3 = Conv2D(16, (3, 3), activation='relu', padding='same')(encoded_layer2)
latent_view    = MaxPool2D( (2, 2), padding='same')(encoded_layer3)

Setup Decoder Neural Network

Try different number of hidden layers, nodes?

In [10]:
# decoding architecture
decoded_layer1 = Conv2D(16, (3, 3), activation='relu', padding='same')(latent_view)
decoded_layer1 = UpSampling2D((2, 2))(decoded_layer1)
decoded_layer2 = Conv2D(32, (3, 3), activation='relu', padding='same')(decoded_layer1)
decoded_layer2 = UpSampling2D((2, 2))(decoded_layer2)
decoded_layer3 = Conv2D(64, (3, 3), activation='relu')(decoded_layer2)
decoded_layer3 = UpSampling2D((2, 2))(decoded_layer3)
output_layer   = Conv2D(1, (3, 3), padding='same')(decoded_layer3)

Train AE

In [11]:
# compile the model
model = Model(input_layer, output_layer)
model.compile(optimizer='adam', loss='mse')
In [12]:
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 28, 28, 1)         0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 28, 28, 64)        640       
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 64)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 14, 14, 32)        18464     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 32)          0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 7, 7, 16)          4624      
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 4, 4, 16)          0         
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 4, 4, 16)          2320      
_________________________________________________________________
up_sampling2d_1 (UpSampling2 (None, 8, 8, 16)          0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 8, 8, 32)          4640      
_________________________________________________________________
up_sampling2d_2 (UpSampling2 (None, 16, 16, 32)        0         
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 14, 14, 64)        18496     
_________________________________________________________________
up_sampling2d_3 (UpSampling2 (None, 28, 28, 64)        0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 28, 28, 1)         577       
=================================================================
Total params: 49,761
Trainable params: 49,761
Non-trainable params: 0
_________________________________________________________________
In [13]:
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=5, mode='auto')
history = model.fit(train_x_n, train_x, epochs=20, batch_size=2048, validation_data=(val_x_n, val_x), callbacks=[early_stopping])
Train on 43350 samples, validate on 7650 samples
Epoch 1/20
43350/43350 [==============================] - 7s 160us/step - loss: 0.0945 - val_loss: 0.0563
Epoch 2/20
43350/43350 [==============================] - 2s 46us/step - loss: 0.0481 - val_loss: 0.0403
Epoch 3/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0365 - val_loss: 0.0327
Epoch 4/20
43350/43350 [==============================] - 2s 46us/step - loss: 0.0306 - val_loss: 0.0283
Epoch 5/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0270 - val_loss: 0.0253
Epoch 6/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0248 - val_loss: 0.0237
Epoch 7/20
43350/43350 [==============================] - 2s 48us/step - loss: 0.0233 - val_loss: 0.0224
Epoch 8/20
43350/43350 [==============================] - 2s 46us/step - loss: 0.0223 - val_loss: 0.0216
Epoch 9/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0216 - val_loss: 0.0209
Epoch 10/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0208 - val_loss: 0.0205
Epoch 11/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0205 - val_loss: 0.0200
Epoch 12/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0199 - val_loss: 0.0194
Epoch 13/20
43350/43350 [==============================] - 2s 48us/step - loss: 0.0201 - val_loss: 0.0197
Epoch 14/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0193 - val_loss: 0.0187
Epoch 15/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0188 - val_loss: 0.0184
Epoch 16/20
43350/43350 [==============================] - 2s 48us/step - loss: 0.0184 - val_loss: 0.0181
Epoch 17/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0182 - val_loss: 0.0178
Epoch 18/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0181 - val_loss: 0.0176
Epoch 19/20
43350/43350 [==============================] - 2s 46us/step - loss: 0.0178 - val_loss: 0.0175
Epoch 20/20
43350/43350 [==============================] - 2s 47us/step - loss: 0.0175 - val_loss: 0.0171

Visualize Intermediate Layers of AE

Visualize intermediate layers

In [14]:
# compile the model
model_2 = Model(input_layer, latent_view)
model_2.compile(optimizer='adam', loss='mse')
In [15]:
n = np.random.randint(0,len(val_x)-5)
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i,a in enumerate(range(n,n+5)):
    ax[i].imshow(val_x_n[a, :, :, 0].reshape(28, 28))
plt.show()
In [16]:
preds = model_2.predict(val_x_n[n:n+5])
preds.shape
Out[16]:
(5, 4, 4, 16)
In [17]:
f, ax = plt.subplots(16,5)
ax = ax.ravel()
f.set_size_inches(20, 40)
for j in range(16):
    for i,a in enumerate(range(n,n+5)):
        ax[j*5 + i].imshow(preds[i, :, :, j])
plt.show()

Visualize Samples reconstructed by AE

In [18]:
n = np.random.randint(0,len(val_x)-5)
In [19]:
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i,a in enumerate(range(n,n+5)):
    ax[i].imshow(val_x[a, :, :, 0].reshape(28, 28))
In [20]:
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i,a in enumerate(range(n,n+5)):
    ax[i].imshow(val_x_n[a, :, :, 0].reshape(28, 28))
In [21]:
preds = model.predict(val_x_n[n:n+5])
f, ax = plt.subplots(1,5)
f.set_size_inches(80, 40)
for i,a in enumerate(range(n,n+5)):
    ax[i].imshow(preds[i].reshape(28, 28))
plt.show()

Part 3: Exercise: Denoising noisy documents

In [22]:
TRAIN_IMAGES = glob.glob('data/train/*.png')
CLEAN_IMAGES = glob.glob('data/train_cleaned/*.png')
TEST_IMAGES = glob.glob('data/test/*.png')
In [23]:
plt.figure(figsize=(20,8))
img = cv2.imread('data/train/101.png', 0)
plt.imshow(img, cmap='gray')
print(img.shape)
(420, 540)
In [24]:
def load_image(path):
    image_list = np.zeros((len(path), 258, 540, 1))
    for i, fig in enumerate(path):
        img = image.load_img(fig, grayscale=True, target_size=(258, 540))
        x = image.img_to_array(img).astype('float32')
        x = x / 255.0
        image_list[i] = x
    
    return image_list

x_train = load_image(TRAIN_IMAGES)
y_train = load_image(CLEAN_IMAGES)
x_test = load_image(TEST_IMAGES)

print(x_train.shape, x_test.shape)
(144, 258, 540, 1) (72, 258, 540, 1)
In [1]:
#Todo: Split your dataset into train and val
In [2]:
#Todo: Visualize your train set
In [3]:
input_layer = Input(shape=(258, 540, 1))
  
#Todo: Setup encoder
#Hint: Conv2D - > MaxPooling

#Todo: Setup decoder
#Hint: Conv2D - > Upsampling

output_layer = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(decoder)

ae = Model(input_layer, output_layer)
In [4]:
#Todo: Compile and summarize your auto encoder
In [5]:
#Todo: Train your autoencoder
In [32]:
preds = ae.predict(x_test)
In [6]:
# n = 25
# preds_0 = preds[n] * 255.0
# preds_0 = preds_0.reshape(258, 540)
# x_test_0 = x_test[n] * 255.0
# x_test_0 = x_test_0.reshape(258, 540)
# plt.imshow(x_test_0, cmap='gray')
In [7]:
# plt.imshow(preds_0, cmap='gray')

Part 4: Generating New Fashion using VAEs

Understanding VAEs

"VAE"

Reset data

In [35]:
### read dataset 
train = pd.read_csv("data/fashion-mnist_train.csv")
train_x = train[list(train.columns)[1:]].values
train_x, val_x = train_test_split(train_x, test_size=0.2)

## create train and validation datasets
train_x, val_x = train_test_split(train_x, test_size=0.2)
In [36]:
## normalize and reshape
train_x = train_x/255.
val_x = val_x/255.

train_x = train_x.reshape(-1, 28, 28, 1)
val_x = val_x.reshape(-1, 28, 28, 1)

Setup Encoder Neural Network

Try different number of hidden layers, nodes?

In [37]:
import keras.backend as K
In [38]:
batch_size = 16
latent_dim = 2  # Number of latent dimension parameters

input_img = Input(shape=(784,), name="input")
x = Dense(512, activation='relu', name="intermediate_encoder")(input_img)
x = Dense(2, activation='relu', name="latent_encoder")(x)

z_mu = Dense(latent_dim)(x)
z_log_sigma = Dense(latent_dim)(x)
In [39]:
# sampling function
def sampling(args):
    z_mu, z_log_sigma = args
    epsilon = K.random_normal(shape=(K.shape(z_mu)[0], latent_dim),
                              mean=0., stddev=1.)
    return z_mu + K.exp(z_log_sigma) * epsilon

# sample vector from the latent distribution
z = Lambda(sampling)([z_mu, z_log_sigma])
In [40]:
# decoder takes the latent distribution sample as input
decoder_input = Input((2,), name="input_decoder")

x = Dense(512, activation='relu', name="intermediate_decoder", input_shape=(2,))(decoder_input)

# Expand to 784 total pixels
x = Dense(784, activation='sigmoid', name="original_decoder")(x)

# decoder model statement
decoder = Model(decoder_input, x)

# apply the decoder to the sample from the latent distribution
z_decoded = decoder(z)
In [41]:
decoder.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_decoder (InputLayer)   (None, 2)                 0         
_________________________________________________________________
intermediate_decoder (Dense) (None, 512)               1536      
_________________________________________________________________
original_decoder (Dense)     (None, 784)               402192    
=================================================================
Total params: 403,728
Trainable params: 403,728
Non-trainable params: 0
_________________________________________________________________
In [42]:
# construct a custom layer to calculate the loss
class CustomVariationalLayer(Layer):

    def vae_loss(self, x, z_decoded):
        x = K.flatten(x)
        z_decoded = K.flatten(z_decoded)
        # Reconstruction loss
        xent_loss = binary_crossentropy(x, z_decoded)
        return xent_loss

    # adds the custom loss to the class
    def call(self, inputs):
        x = inputs[0]
        z_decoded = inputs[1]
        loss = self.vae_loss(x, z_decoded)
        self.add_loss(loss, inputs=inputs)
        return x

# apply the custom loss to the input images and the decoded latent distribution sample
y = CustomVariationalLayer()([input_img, z_decoded])
In [43]:
z_decoded
Out[43]:
In [44]:
# VAE model statement
vae = Model(input_img, y)
vae.compile(optimizer='rmsprop', loss=None)
In [45]:
vae.summary()
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input (InputLayer)              (None, 784)          0                                            
__________________________________________________________________________________________________
intermediate_encoder (Dense)    (None, 512)          401920      input[0][0]                      
__________________________________________________________________________________________________
latent_encoder (Dense)          (None, 2)            1026        intermediate_encoder[0][0]       
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 2)            6           latent_encoder[0][0]             
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 2)            6           latent_encoder[0][0]             
__________________________________________________________________________________________________
lambda_1 (Lambda)               (None, 2)            0           dense_1[0][0]                    
                                                                 dense_2[0][0]                    
__________________________________________________________________________________________________
model_4 (Model)                 (None, 784)          403728      lambda_1[0][0]                   
__________________________________________________________________________________________________
custom_variational_layer_1 (Cus [(None, 784), (None, 0           input[0][0]                      
                                                                 model_4[1][0]                    
==================================================================================================
Total params: 806,686
Trainable params: 806,686
Non-trainable params: 0
__________________________________________________________________________________________________
In [46]:
train_x.shape
Out[46]:
(38400, 28, 28, 1)
In [47]:
train_x = train_x.reshape(-1, 784)
val_x = val_x.reshape(-1, 784)
In [48]:
vae.fit(x=train_x, y=None,
        shuffle=True,
        epochs=20,
        batch_size=batch_size,
        validation_data=(val_x, None))
Train on 38400 samples, validate on 9600 samples
Epoch 1/20
38400/38400 [==============================] - 10s 256us/step - loss: 0.4055 - val_loss: 0.3847
Epoch 2/20
38400/38400 [==============================] - 9s 242us/step - loss: 0.3805 - val_loss: 0.3773
Epoch 3/20
38400/38400 [==============================] - 9s 243us/step - loss: 0.3594 - val_loss: 0.3461
Epoch 4/20
38400/38400 [==============================] - 9s 242us/step - loss: 0.3448 - val_loss: 0.3408
Epoch 5/20
38400/38400 [==============================] - 9s 241us/step - loss: 0.3403 - val_loss: 0.3371
Epoch 6/20
38400/38400 [==============================] - 9s 244us/step - loss: 0.3378 - val_loss: 0.3378
Epoch 7/20
38400/38400 [==============================] - 9s 244us/step - loss: 0.3363 - val_loss: 0.3334
Epoch 8/20
38400/38400 [==============================] - 9s 241us/step - loss: 0.3351 - val_loss: 0.3337
Epoch 9/20
38400/38400 [==============================] - 9s 242us/step - loss: 0.3340 - val_loss: 0.3343
Epoch 10/20
38400/38400 [==============================] - 9s 243us/step - loss: 0.3329 - val_loss: 0.3347
Epoch 11/20
38400/38400 [==============================] - 9s 244us/step - loss: 0.3323 - val_loss: 0.3310
Epoch 12/20
38400/38400 [==============================] - 9s 243us/step - loss: 0.3318 - val_loss: 0.3324
Epoch 13/20
38400/38400 [==============================] - 9s 246us/step - loss: 0.3313 - val_loss: 0.3304
Epoch 14/20
38400/38400 [==============================] - 9s 244us/step - loss: 0.3310 - val_loss: 0.3312
Epoch 15/20
38400/38400 [==============================] - 9s 241us/step - loss: 0.3307 - val_loss: 0.3344
Epoch 16/20
38400/38400 [==============================] - 9s 244us/step - loss: 0.3305 - val_loss: 0.3299
Epoch 17/20
38400/38400 [==============================] - 9s 242us/step - loss: 0.3301 - val_loss: 0.3291
Epoch 18/20
38400/38400 [==============================] - 9s 241us/step - loss: 0.3298 - val_loss: 0.3300
Epoch 19/20
38400/38400 [==============================] - 9s 242us/step - loss: 0.3297 - val_loss: 0.3290
Epoch 20/20
38400/38400 [==============================] - 9s 244us/step - loss: 0.3294 - val_loss: 0.3296
Out[48]:
In [49]:
# Display a 2D manifold of the samples
n = 20  # figure with 20x20 samples
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))

# Construct grid of latent variable values - can change values here to generate different things
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))

# decode for each square in the grid
for i, yi in enumerate(grid_x):
    for j, xi in enumerate(grid_y):
        z_sample = np.array([[xi, yi]])
        z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
        
        x_decoded = decoder.predict(z_sample, batch_size=batch_size)
        
        digit = x_decoded[0].reshape(digit_size, digit_size)
        
        figure[i * digit_size: (i + 1) * digit_size,
               j * digit_size: (j + 1) * digit_size] = digit

plt.figure(figsize=(20, 20))
plt.imshow(figure)
plt.show()  
In [50]:
### read dataset 
train = pd.read_csv("fashion-mnist_train.csv")
train_x = train[list(train.columns)[1:]].values
train_y = train[list(train.columns)[0]].values

train_x = train_x/255.
# train_x = train_x.reshape(-1, 28, 28, 1)

# Translate into the latent space
encoder = Model(input_img, z_mu)
x_valid_noTest_encoded = encoder.predict(train_x, batch_size=batch_size)
plt.figure(figsize=(10, 10))
plt.scatter(x_valid_noTest_encoded[:, 0], x_valid_noTest_encoded[:, 1], c=train_y, cmap='brg')
plt.colorbar()
plt.show()

Part 5: Exercise: Generating New Fashion using VAEs: Adding CNNs and KL Divergence Loss

In [8]:
batch_size = 16
latent_dim = 2  # Number of latent dimension parameters

# Hint: Encoder architecture: Input -> Conv2D*4 -> Flatten -> Dense
input_img = Input(shape=(28, 28, 1))


#Todo: Setup encoder network

shape_before_flattening = K.int_shape(x)

x = Flatten()(x)
x = Dense(32, activation='relu')(x)

# Two outputs, latent mean and (log)variance
z_mu = Dense(latent_dim)(x)
z_log_sigma = Dense(latent_dim)(x)

Set up sampling function

In [52]:
# sampling function
def sampling(args):
    z_mu, z_log_sigma = args
    epsilon = K.random_normal(shape=(K.shape(z_mu)[0], latent_dim),
                              mean=0., stddev=1.)
    return z_mu + K.exp(z_log_sigma) * epsilon

# sample vector from the latent distribution
z = Lambda(sampling)([z_mu, z_log_sigma])

Setup Decoder Neural Network

Try different number of hidden layers, nodes?

In [9]:
# decoder takes the latent distribution sample as input
decoder_input = Input(K.int_shape(z)[1:])

#Todo: Setup decoder network
#Hint Expand to 784 pixels -> reshape -> Conv2Dtranspose -> conv2D

# decoder model statement
decoder = Model(decoder_input, x)

# apply the decoder to the sample from the latent distribution
z_decoded = decoder(z)

Set up loss functions

In [54]:
# construct a custom layer to calculate the loss
class CustomVariationalLayer(Layer):

    def vae_loss(self, x, z_decoded):
        x = K.flatten(x)
        z_decoded = K.flatten(z_decoded)
        # Reconstruction loss
        xent_loss = binary_crossentropy(x, z_decoded)
        # KL divergence
        kl_loss = -5e-4 * K.mean(1 + z_log_sigma - K.square(z_mu) - K.exp(z_log_sigma), axis=-1)
        return K.mean(xent_loss + kl_loss)

    # adds the custom loss to the class
    def call(self, inputs):
        x = inputs[0]
        z_decoded = inputs[1]
        loss = self.vae_loss(x, z_decoded)
        self.add_loss(loss, inputs=inputs)
        return x

# apply the custom loss to the input images and the decoded latent distribution sample
y = CustomVariationalLayer()([input_img, z_decoded])

Train VAE

In [55]:
# VAE model statement
vae = Model(input_img, y)
vae.compile(optimizer='rmsprop', loss=None)
In [10]:
vae.summary()
In [57]:
train_x = train_x.reshape(-1, 28, 28, 1)
val_x = val_x.reshape(-1, 28, 28, 1)
In [11]:
vae.fit(x=train_x, y=None,
        shuffle=True,
        epochs=20,
        batch_size=batch_size,
        validation_data=(val_x, None))

Visualize Samples reconstructed by VAE

In [12]:
# Display a 2D manifold of the samples
n = 20  # figure with 20x20 samples
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))

# Construct grid of latent variable values - can change values here to generate different things
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))

# decode for each square in the grid
for i, yi in enumerate(grid_x):
    for j, xi in enumerate(grid_y):
        z_sample = np.array([[xi, yi]])
        z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
        x_decoded = decoder.predict(z_sample, batch_size=batch_size)
        digit = x_decoded[0].reshape(digit_size, digit_size)
        figure[i * digit_size: (i + 1) * digit_size,
               j * digit_size: (j + 1) * digit_size] = digit

plt.figure(figsize=(20, 20))
plt.imshow(figure)
plt.show()  

Exercise: Visualize latent space

In [60]:
train = pd.read_csv("data/fashion-mnist_train.csv")
In [61]:
### read dataset 
train = pd.read_csv("data/fashion-mnist_train.csv")
train_x = train[list(train.columns)[1:]].values
train_y = train[list(train.columns)[0]].values

train_x = train_x/255.
train_x = train_x.reshape(-1, 28, 28, 1)
In [13]:
# Translate into the latent space
encoder = Model(input_img, z_mu)
x_valid_noTest_encoded = encoder.predict(train_x, batch_size=batch_size)
plt.figure(figsize=(10, 10))
plt.scatter(x_valid_noTest_encoded[:, 0], x_valid_noTest_encoded[:, 1], c=train_y, cmap='brg')
plt.colorbar()
plt.show()