Commit 49b12917 authored by Kaan Güney Keklikçi's avatar Kaan Güney Keklikçi

implemented inverse autogregressive flow, adding optimize test for maf

parent dd081af7
......@@ -10,8 +10,16 @@ plt.style.use('seaborn')
tfd = tfp.distributions
tfb = tfp.bijectors
class MAF:
def __init__(self, dtype, tf_version, batch_size, params, hidden_units, base_dist, dims):
class MAF(object):
def __init__(self, dtype, tf_version,
batch_size, params, hidden_units,
base_dist, dims,
activation,
conditional, hidden_degrees,
conditional_event_shape,
conditional_input_layers,
event_shape):
self.tf_version = tf_version
self.dtype = dtype
self.base_dist = base_dist
......@@ -19,6 +27,12 @@ class MAF:
self.params = params
self.hidden_units = hidden_units
self.batch_size = batch_size
self.activation = activation
self.conditional = conditional
self.conditional_event_shape = conditional_event_shape
self.hidden_degrees = hidden_degrees
self.conditional_input_layers = conditional_input_layers
self.event_shape = event_shape
def get_tf_version(self):
return self.tf_version
......@@ -39,8 +53,16 @@ class MAF:
samples = data_iterator.get_next()
return samples
def get_shift_scale_func(self, data):
func = tfb.AutoregressiveNetwork(params=self.params, hidden_units=self.hidden_units)
def get_shift_scale_func(self):
func = tfb.AutoregressiveNetwork(params=self.params,
hidden_units=self.hidden_units,
activation=self.activation,
conditional=self.conditional,
conditional_event_shape=self.conditional_event_shape,
event_shape=self.event_shape,
conditional_input_layers=self.conditional_input_layers,
hidden_degrees=self.hidden_degrees
)
return func
def make_maf(self, data):
......@@ -51,3 +73,13 @@ class MAF:
maf = tfd.TransformedDistribution(tfd.Sample(distribution, sample_shape), bijector)
return maf
class IAF(MAF):
def make_maf(self, data):
distribution = self.base_dist
sample_shape = self.get_dims(data)
shift_scale_function = self.get_shift_scale_func()
bijector = tfb.Invert(tfb.MaskedAutoregressiveFlow(shift_scale_function))
maf = tfd.TransformedDistribution(tfd.Sample(distribution, sample_shape), bijector)
return maf
\ No newline at end of file
......@@ -8,6 +8,7 @@ tf.compat.v1.disable_eager_execution()
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
plt.style.use('seaborn')
from data_loader import load_data
from data_preprocesser import preprocess_data
......@@ -28,7 +29,6 @@ def train(session, loss, optimizer, steps=int(1e5)):
print('Iteration {iteration}: {loss}'.format(iteration=i,loss=loss_per_iteration))
return recorded_losses
def plot_results(recorded_losses):
""" plot loss """
......@@ -81,10 +81,23 @@ def main():
base_dist = tfp.distributions.Normal(loc=0., scale=1.)
dims = data.shape[1]
learning_rate = 1e-4
activation = 'relu'
hidden_degrees = 'random'
conditional=True
conditional_event_shape = (dims,)
event_shape = conditional_event_shape
conditional_input_layers = 'first_layer'
""" initialize samples """
maf = MAF(dtype, tf_version, batch_size, params, hidden_units, base_dist, dims)
maf = MAF(dtype, tf_version, batch_size,
params, hidden_units, base_dist, dims,
activation,
conditional, hidden_degrees,
conditional_event_shape,
conditional_input_layers,
event_shape
)
dims = maf.get_dims(data)
samples = maf.create_tensor(data)
......@@ -99,7 +112,7 @@ def main():
""" initialize loss and optimizer """
loss = -tf.reduce_mean(maf.log_prob(samples))
loss = -tf.reduce_mean(maf.log_prob(samples, bijector_kwargs={'conditional_input': samples}))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss)
session = tf.compat.v1.Session()
......
......@@ -92,10 +92,23 @@ def main():
dims = data.shape[1]
learning_rate = 1e-4
steps = 1e4
activation = 'relu'
hidden_degrees = 'random'
conditional=True
conditional_event_shape = (dims,)
event_shape = conditional_event_shape
conditional_input_layers = 'first_layer'
""" initialize samples """
maf = MAF(dtype, tf_version, batch_size, params, hidden_units, base_dist, dims)
maf = MAF(dtype, tf_version, batch_size,
params, hidden_units, base_dist, dims,
activation,
conditional, hidden_degrees,
conditional_event_shape,
conditional_input_layers,
event_shape
)
dims = maf.get_dims(data)
samples = maf.create_tensor(data)
......@@ -110,7 +123,7 @@ def main():
""" initialize loss and optimizer """
loss = -tf.reduce_mean(maf.log_prob(samples))
loss = -tf.reduce_mean(maf.log_prob(samples, bijector_kwargs={'conditional_input': samples}))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss)
experiment = Experiment(optimizer, learning_rate, loss, steps)
......@@ -138,3 +151,6 @@ def main():
if __name__ == "__main__":
main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment