I'm trying to train the code on my own data which is of shape [82 x 41 x 1]. I adapted the size of the fully connected layer in the discriminator accordingly and fixed data reading part to cope with single channel data. However the program still crashes with the above error message.
It crashes either on the line that calls for the gradient computation in def build_model(self):
self.grad_complete_loss = tf.gradients(self.complete_loss, self.z)
tf.global_variables_initializer().run()
def discriminator(self, image, reuse=False, output_size = 9216):
if reuse:
tf.get_variable_scope().reuse_variables()
h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv'))
h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv')))
h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv')))
h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, name='d_h3_conv')))
h4 = linear(tf.reshape(h3, [-1, output_size]), 1, 'd_h3_lin') # 64x64 --> 4x4x512 = 8192
return tf.nn.sigmoid(h4), h4
def generator(self, z, c_dim, output_size = (64,64)):
self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*4*4, 'g_h0_lin', with_w=True) # 64x8x4x4 = 512x4x4
self.h0 = tf.reshape(self.z_, [-1, 4, 4, self.gf_dim * 8]) # 512x4x4
h0 = tf.nn.relu(self.g_bn0(self.h0))
self.h1, self.h1_w, self.h1_b = conv2d_transpose(h0,
[self.batch_size, 8, 8, self.gf_dim*4], name='g_h1', with_w=True) # 256x8x8
h1 = tf.nn.relu(self.g_bn1(self.h1))
h2, self.h2_w, self.h2_b = conv2d_transpose(h1,
[self.batch_size, 16, 16, self.gf_dim*2], name='g_h2', with_w=True) # 128x16x16
h2 = tf.nn.relu(self.g_bn2(h2))
h3, self.h3_w, self.h3_b = conv2d_transpose(h2,
[self.batch_size, 32, 32, self.gf_dim*1], name='g_h3', with_w=True) # 64x32x32
h3 = tf.nn.relu(self.g_bn3(h3))
h4, self.h4_w, self.h4_b = conv2d_transpose(h3,
[self.batch_size, output_size[0], output_size[1], c_dim], name='g_h4', with_w=True) # 1x64x64
return tf.nn.tanh(h4)
def sampler(self, z, cdim, y=None, output_size = (64,64)):
tf.get_variable_scope().reuse_variables()
h0 = tf.reshape(linear(z, self.gf_dim*8*4*4, 'g_h0_lin'),
[-1, 4, 4, self.gf_dim * 8])
h0 = tf.nn.relu(self.g_bn0(h0, train=False))
h1 = conv2d_transpose(h0, [self.batch_size, 8, 8, self.gf_dim*4], name='g_h1')
h1 = tf.nn.relu(self.g_bn1(h1, train=False))
h2 = conv2d_transpose(h1, [self.batch_size, 16, 16, self.gf_dim*2], name='g_h2')
h2 = tf.nn.relu(self.g_bn2(h2, train=False))
h3 = conv2d_transpose(h2, [self.batch_size, 32, 32, self.gf_dim*1], name='g_h3')
h3 = tf.nn.relu(self.g_bn3(h3, train=False))
h4 = conv2d_transpose(h3, [self.batch_size, output_size[0], output_size[1], cdim], name='g_h4')