Commit c8d0b24d authored by Kaan Güney Keklikçi's avatar Kaan Güney Keklikçi

modular maf implementation, Python=3.7.10 scripts

parent 154e756a
import numpy as np
import pandas as pd
from pathlib import Path
import xlrd
class load_data:
def __init__(self, filename, directory):
self.filename = filename
self.directory = directory
def create_directory(self, directory):
Path(directory).mkdir(parents=True, exist_ok=True)
def read_data(self, directory, filename):
data_dir = self.directory + self.filename
wb = xlrd.open_workbook(data_dir, encoding_override='iso-8859-1')
return pd.read_excel(wb)
\ No newline at end of file
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class preprocess_data:
def __init__(self, scaler, fillna_vals, dropna_vals, drop_vals):
self.scaler = scaler
self.fillna_vals = fillna_vals
self.dropna_vals = dropna_vals
self.drop_vals = drop_vals
def dropna_features(self, data):
data = data.dropna(subset = self.dropna_vals)
return data
def impute(self, data):
for feature in self.fillna_vals:
data[feature] = data[feature].fillna(value = np.mean(data[feature]))
return data
def drop_features(self, data):
data.drop(self.drop_vals, axis=1, inplace=True)
data.reset_index(drop=True, inplace=True)
return data
def encode_categorical(self, data):
data = pd.get_dummies(data)
return data
def scale(self, data):
columns = data.columns
data = self.scaler.fit_transform(data)
data = pd.DataFrame(data,columns=columns)
return data
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tf.compat.v1.disable_eager_execution()
import os
import matplotlib.pyplot as plt
plt.style.use('seaborn')
tfd = tfp.distributions
tfb = tfp.bijectors
class MAF:
def __init__(self, dtype, tf_version, batch_size, params, hidden_units, base_dist, dims):
self.tf_version = tf_version
self.dtype = dtype
self.base_dist = base_dist
self.dims = dims
self.params = params
self.hidden_units = hidden_units
self.batch_size = batch_size
def get_tf_version(self):
return self.tf_version
def get_session(self):
return tf.compat.v1.Session()
def get_dims(self, data):
return data.shape[1]
def create_tensor(self, data):
dataset = tf.data.Dataset.from_tensor_slices(data.astype(self.dtype))
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=data.shape[0])
dataset = dataset.prefetch(2*self.batch_size)
dataset = dataset.batch(self.batch_size)
data_iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
samples = data_iterator.get_next()
return samples
def get_shift_scale_func(self, data):
func = tfb.AutoregressiveNetwork(params=self.params, hidden_units=self.hidden_units)
return func
def make_maf(self, data):
distribution = self.base_dist
sample_shape = self.get_dims(data)
shift_scale_function = self.get_shift_scale_func(data)
bijector = tfb.MaskedAutoregressiveFlow(shift_scale_function)
maf = tfd.TransformedDistribution(tfd.Sample(distribution, sample_shape), bijector)
return maf
\ No newline at end of file
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import tensorflow_probability as tfp
import matplotlib.pyplot as plt
from data_loader import load_data
from data_preprocesser import preprocess_data
from maf import MAF
def train(session, loss, optimizer, steps=int(1e5)):
""" optimize for all dimensions """
recorded_steps = []
recorded_losses = []
for i in range(steps):
_, loss_per_iteration = session.run([optimizer, loss])
if i % 100 == 0:
recorded_steps.append(i)
recorded_losses.append(loss_per_iteration)
if i % int(1e4) == 0:
print('Iteration {iteration}: {loss}'.format(iteration=i,loss=loss_per_iteration))
return recorded_losses
def plot_results(recorded_losses):
""" plot loss """
print('Displaying results...')
fig = plt.figure(figsize=(10,5))
x = np.arange(len(recorded_losses))
y = recorded_losses
m, b = np.polyfit(x, y, 1)
plt.scatter(x, y, s=10, alpha=0.3)
plt.plot(x, m*x+b, c="r")
plt.title('Loss per 100 iteration')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.tight_layout()
plt.show()
def main():
""" load data """
filename = 'prostate.xls'
directory = '/Users/kaanguney.keklikci/Data/'
loader = load_data(filename, directory)
loader.create_directory(directory)
data = loader.read_data(directory, filename)
print('Data successfully loaded...\n')
""" preprocess data """
fillna_vals = ['sz', 'sg', 'wt']
dropna_vals = ['ekg', 'age']
drop_vals = ['patno', 'sdate']
preprocesser = preprocess_data(StandardScaler(), fillna_vals, dropna_vals, drop_vals)
data = preprocesser.dropna_features(data)
data = preprocesser.impute(data)
data = preprocesser.drop_features(data)
data = preprocesser.encode_categorical(data)
data = preprocesser.scale(data)
print('Data successfully preprocessed...\n')
""" set MAF parameters """
batch_size = 32
dtype = np.float32
tf_version = tf.__version__
params = 2
hidden_units = [512,512]
base_dist = tfp.distributions.Normal(loc=0., scale=1.)
dims = data.shape[1]
learning_rate = 1e-4
""" initialize samples """
maf = MAF(dtype, tf_version, batch_size, params, hidden_units, base_dist, dims)
dims = maf.get_dims(data)
samples = maf.create_tensor(data)
print(f'TensorFlow version: {maf.tf_version}')
print(f'Number of dimensions: {maf.dims}')
print(f'Learning rate: {learning_rate}\n')
""" initialize MAF """
maf = maf.make_maf(data)
print('Successfully created model...\n')
""" initialize loss and optimizer """
loss = -tf.reduce_mean(maf.log_prob(samples))
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss)
session = tf.compat.v1.Session()
tf.compat.v1.set_random_seed(42)
session.run(tf.compat.v1.global_variables_initializer())
print('Optimizer and loss successfully defined...\n')
""" start training """
recorded_losses = train(session, loss, optimizer)
print('Training finished...\n')
""" display results """
plot_results(recorded_losses)
if __name__ == "__main__":
main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment