Skip to content

Commit

Permalink
Update to TF1.1 TL1.4.5
Browse files Browse the repository at this point in the history
  • Loading branch information
zsdonghao committed May 27, 2017
1 parent 27d83c3 commit 8ebb42f
Show file tree
Hide file tree
Showing 28 changed files with 437 additions and 332 deletions.
34 changes: 7 additions & 27 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ def main(_):
tl.files.exists_or_mkdir(FLAGS.sample_dir)

z_dim = 100

with tf.device("/gpu:0"):
##========================= DEFINE MODEL ===========================##
z = tf.placeholder(tf.float32, [FLAGS.batch_size, z_dim], name='z_noise')
Expand Down Expand Up @@ -94,15 +93,14 @@ def main(_):
net_d_name = os.path.join(save_dir, 'net_d.npz')

data_files = glob(os.path.join("./data", FLAGS.dataset, "*.jpg"))
# sample_seed = np.random.uniform(low=-1, high=1, size=(FLAGS.sample_size, z_dim)).astype(np.float32)
sample_seed = np.random.normal(loc=0.0, scale=1.0, size=(FLAGS.sample_size, z_dim)).astype(np.float32)

sample_seed = np.random.normal(loc=0.0, scale=1.0, size=(FLAGS.sample_size, z_dim)).astype(np.float32)# sample_seed = np.random.uniform(low=-1, high=1, size=(FLAGS.sample_size, z_dim)).astype(np.float32)

##========================= TRAIN MODELS ================================##
iter_counter = 0
for epoch in range(FLAGS.epoch):
## shuffle data
shuffle(data_files)
print("[*] Dataset shuffled!")

## update sample files based on shuffled data
sample_files = data_files[0:FLAGS.sample_size]
Expand All @@ -119,46 +117,28 @@ def main(_):
# more image augmentation functions in http://tensorlayer.readthedocs.io/en/latest/modules/prepro.html
batch = [get_image(batch_file, FLAGS.image_size, is_crop=FLAGS.is_crop, resize_w=FLAGS.output_size, is_grayscale = 0) for batch_file in batch_files]
batch_images = np.array(batch).astype(np.float32)
# batch_z = np.random.uniform(low=-1, high=1, size=(FLAGS.batch_size, z_dim)).astype(np.float32)
batch_z = np.random.normal(loc=0.0, scale=1.0, size=(FLAGS.sample_size, z_dim)).astype(np.float32)
batch_z = np.random.normal(loc=0.0, scale=1.0, size=(FLAGS.sample_size, z_dim)).astype(np.float32) # batch_z = np.random.uniform(low=-1, high=1, size=(FLAGS.batch_size, z_dim)).astype(np.float32)
start_time = time.time()
# updates the discriminator
errD, _ = sess.run([d_loss, d_optim], feed_dict={z: batch_z, real_images: batch_images })
# updates the generator, run generator twice to make sure that d_loss does not go to zero (difference from paper)
for _ in range(2):
errG, _ = sess.run([g_loss, g_optim], feed_dict={z: batch_z})
print("Epoch: [%2d/%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f, g_loss: %.8f" \
% (epoch, FLAGS.epoch, idx, batch_idxs,
time.time() - start_time, errD, errG))
sys.stdout.flush()
% (epoch, FLAGS.epoch, idx, batch_idxs, time.time() - start_time, errD, errG))

iter_counter += 1
if np.mod(iter_counter, FLAGS.sample_step) == 0:
# generate and visualize generated images
img, errD, errG = sess.run([net_g2.outputs, d_loss, g_loss], feed_dict={z : sample_seed, real_images: sample_images})
save_images(img, [8, 8],
'./{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir, epoch, idx))
tl.visualize.save_images(img, [8, 8], './{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir, epoch, idx))
print("[Sample] d_loss: %.8f, g_loss: %.8f" % (errD, errG))
sys.stdout.flush()

if np.mod(iter_counter, FLAGS.save_step) == 0:
# save current network parameters
print("[*] Saving checkpoints...")
img, errD, errG = sess.run([net_g2.outputs, d_loss, g_loss], feed_dict={z : sample_seed, real_images: sample_images})
model_dir = "%s_%s_%s" % (FLAGS.dataset, FLAGS.batch_size, FLAGS.output_size)
save_dir = os.path.join(FLAGS.checkpoint_dir, model_dir)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# the latest version location
net_g_name = os.path.join(save_dir, 'net_g.npz')
net_d_name = os.path.join(save_dir, 'net_d.npz')
# # this version is for future re-check and visualization analysis
# net_g_iter_name = os.path.join(save_dir, 'net_g_%d.npz' % iter_counter)
# net_d_iter_name = os.path.join(save_dir, 'net_d_%d.npz' % iter_counter)
# tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess)
# tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess)
# tl.files.save_npz(net_g.all_params, name=net_g_iter_name, sess=sess)
# tl.files.save_npz(net_d.all_params, name=net_d_iter_name, sess=sess)
tl.files.save_npz(net_g.all_params, name=net_g_name, sess=sess)
tl.files.save_npz(net_d.all_params, name=net_d_name, sess=sess)
print("[*] Saving checkpoints SUCCESS!")

if __name__ == '__main__':
Expand Down
5 changes: 0 additions & 5 deletions model.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,8 @@ def generator_simplified_api(inputs, is_train=True, reuse=False):
gf_dim = 64 # Dimension of gen filters in first conv layer. [64]
c_dim = FLAGS.c_dim # n_color 3
batch_size = FLAGS.batch_size # 64

w_init = tf.random_normal_initializer(stddev=0.02)
gamma_init = tf.random_normal_initializer(1., 0.02)

with tf.variable_scope("generator", reuse=reuse):
tl.layers.set_name_reuse(reuse)

Expand Down Expand Up @@ -47,15 +45,12 @@ def generator_simplified_api(inputs, is_train=True, reuse=False):
net_h4.outputs = tf.nn.tanh(net_h4.outputs)
return net_h4, logits


def discriminator_simplified_api(inputs, is_train=True, reuse=False):
df_dim = 64 # Dimension of discrim filters in first conv layer. [64]
c_dim = FLAGS.c_dim # n_color 3
batch_size = FLAGS.batch_size # 64

w_init = tf.random_normal_initializer(stddev=0.02)
gamma_init = tf.random_normal_initializer(1., 0.02)

with tf.variable_scope("discriminator", reuse=reuse):
tl.layers.set_name_reuse(reuse)

Expand Down
2 changes: 1 addition & 1 deletion tensorlayer/__init__.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from . import rein


__version__ = "1.4.2"
__version__ = "1.4.5"

global_flag = {}
global_dict = {}
Binary file removed tensorlayer/__pycache__/__init__.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/activation.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/cost.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/files.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/iterate.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/layers.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/nlp.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/ops.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/prepro.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/rein.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/utils.cpython-35.pyc
Binary file not shown.
Binary file removed tensorlayer/__pycache__/visualize.cpython-35.pyc
Binary file not shown.
Empty file modified tensorlayer/activation.py
100755 → 100644
Empty file.
16 changes: 8 additions & 8 deletions tensorlayer/cost.py
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,11 @@ def cross_entropy(output, target, name=None):
- About cross-entropy: `wiki <https://en.wikipedia.org/wiki/Cross_entropy>`_.\n
- The code is borrowed from: `here <https://en.wikipedia.org/wiki/Cross_entropy>`_.
"""
# try: # old
# return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, targets=target))
# except: # TF 1.0
# assert name is not None, "Please give a unique name to tl.cost.cross_entropy for TF1.0+"
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output, name=name))
try: # old
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=output, targets=target))
except: # TF 1.0
assert name is not None, "Please give a unique name to tl.cost.cross_entropy for TF1.0+"
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output, name=name))

def sigmoid_cross_entropy(output, target, name=None):
"""It is a sigmoid cross-entropy operation, see ``tf.nn.sigmoid_cross_entropy_with_logits``.
Expand Down Expand Up @@ -72,8 +72,8 @@ def binary_cross_entropy(output, target, epsilon=1e-8, name='bce_loss'):
# output = ops.convert_to_tensor(output, name="preds")
# target = ops.convert_to_tensor(targets, name="target")
with tf.name_scope(name):
return tf.reduce_mean(-(target * tf.log(output + epsilon) +
(1. - target) * tf.log(1. - output + epsilon)))
return tf.reduce_mean(tf.reduce_sum(-(target * tf.log(output + epsilon) +
(1. - target) * tf.log(1. - output + epsilon)), axis=1))


def mean_squared_error(output, target, is_mean=False):
Expand Down Expand Up @@ -223,7 +223,7 @@ def cross_entropy_seq(logits, target_seqs, batch_size=None):#, batch_size=1, num
>>> see PTB tutorial for more details
>>> input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
>>> targets = tf.placeholder(tf.int32, [batch_size, num_steps])
>>> cost = tf.cost.cross_entropy_seq(network.outputs, targets)
>>> cost = tl.cost.cross_entropy_seq(network.outputs, targets)
"""
try: # TF 1.0
sequence_loss_by_example_fn = tf.contrib.legacy_seq2seq.sequence_loss_by_example
Expand Down
Loading

0 comments on commit 8ebb42f

Please sign in to comment.