def train_c3d(self,max_iteration = 100000 , restore_tags = True, trainable_whole=True, trainable_mid = True, ):
        my_multi_train_datasets = multi_train_datasets(batch_size = self.batch_size, video_num = self.video_imgs_num, frame_interval = 2, is_frame = True, is_Optical = True,crop_size=4, img_size=self.img_size_h)
        gpu_options = tf.GPUOptions(allow_growth=True)

        summaries_dir = self.summaries_dir + 'SINGLE_GPU%d.CPTK' % time.time()
        train_writer = tf.summary.FileWriter(summaries_dir)

        with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
            sess.run(tf.global_variables_initializer())
            if restore_tags :
                self.restore_model_weghts(sess)
            
            start_time = time.time()

            for idx in range(max_iteration):  
                try:
                    batch_data = my_multi_train_datasets.get_batches()
                    if trainable_whole:
                        sess.run([ self.c3d_apply ], feed_dict={self.train_in_ph : batch_data,self.phase : True})
                    elif trainable_mid:
                        sess.run([ self.mid_stage_apply ], feed_dict={self.train_in_ph : batch_data,self.phase : True})
                    else:
                        sess.run([ self.optical_apply, self.gray_apply ], feed_dict={self.train_in_ph : batch_data, self.phase : True})

                except Exception:
                    print('training failed')
                

                if (idx+1)%200 == 0:
                    try:
                        batch_data = my_multi_train_datasets.get_batches()
                        train_loss = self.fetch_net_loss(sess, batch_data)
                        print('Epoches Idx%d' %(idx + 1))
                        t_elp = time.time() - start_time
                        h,m,s =  time_hms(t_elp)
                        print('Time Elapsed : %d hours %d minutes %d seconds'%(h, m, s) )
                        t_eta = t_elp/(idx+1)*(max_iteration - idx -1)
                        h,m,s =  time_hms(t_eta)
                        print('Time ETA : %d hours %d minutes %d seconds'%(h, m, s) )
                        print ('loss %.8f, extraloss %.8f,gray_loss %.8f, optical_loss %.8f, mid_stage_loss %.8f'%(train_loss['total'],train_loss['extraloss'],train_loss['gray'] ,train_loss['optical_flow'] ,train_loss['midstage'] ))
                        train_writer.add_summary(train_loss['summary'], (idx+1))
                        train_writer.flush()
                    except Exception:
                        print('fet net loss failed')
                    

                if (idx+1)%500 == 0 and (idx+1) >=2000:
                # if (idx+1)%50 == 0:
                    self.save_model_weghts(sess)
        return
Esempio n. 2
0
    def train_c3d(
        self,
        max_iteration=100000,
        restore_tags=True,
        trainable_whole=True,
        trainable_mid=True,
    ):
        my_multi_train_datasets = multi_train_datasets(batch_size=8,
                                                       video_num=4,
                                                       frame_interval=2,
                                                       is_frame=True,
                                                       is_Optical=True,
                                                       crop_size=4,
                                                       img_size=256)
        gpu_options = tf.GPUOptions(allow_growth=True)

        with tf.Session(config=tf.ConfigProto(
                gpu_options=gpu_options)) as sess:
            sess.run(tf.global_variables_initializer())
            if restore_tags:
                self.restore_model_weghts(sess)

            start_time = time.time()

            for idx in range(max_iteration):
                batch_data = my_multi_train_datasets.get_batches()
                if trainable_whole:
                    sess.run([self.c3d_apply],
                             feed_dict={
                                 self.train_in_ph: batch_data,
                                 self.phase: True
                             })
                elif trainable_mid:
                    sess.run([self.mid_stage_apply],
                             feed_dict={
                                 self.train_in_ph: batch_data,
                                 self.phase: True
                             })
                else:
                    sess.run([self.optical_apply, self.gray_apply],
                             feed_dict={
                                 self.train_in_ph: batch_data,
                                 self.phase: True
                             })

                if (idx + 1) % 100 == 0:
                    batch_data = my_multi_train_datasets.get_batches()
                    train_loss = self.fetch_net_loss(sess, batch_data)
                    print('Epoches Idx%d' % (idx + 1))
                    t_elp = time.time() - start_time
                    h, m, s = time_hms(t_elp)
                    print('Time Elapsed : %d hours %d minutes %d seconds' %
                          (h, m, s))
                    t_eta = t_elp / (idx + 1) * (max_iteration - idx - 1)
                    h, m, s = time_hms(t_eta)
                    print('Time ETA : %d hours %d minutes %d seconds' %
                          (h, m, s))
                    print(
                        'loss %.8f, gray_loss %.8f, optical_loss %.8f, mid_stage_loss %.8f'
                        % (train_loss['total'], train_loss['gray'],
                           train_loss['optical_flow'], train_loss['midstage']))

                if (idx + 1) % 500 == 0 and (idx + 1) >= 2000:
                    self.save_model_weghts(sess)
        return
Esempio n. 3
0
    def train(self, max_iteration, GPU_IN_USE):

        my_multi_train_datasets = multi_train_datasets(
            batch_size=self.batch_size,
            video_num=self.video_imgs_num,
            frame_interval=2,
            is_frame=True,
            is_Optical=False,
            crop_size=4,
            img_size=self.img_size_h)
        loss_f = nn.MSELoss()
        summaries_dir = self.summaries_dir + 'SINGLE_GPU%d.CPTK' % time.time()
        writer = SummaryWriter(summaries_dir)

        for idx in range(max_iteration):
            self.optimizer.zero_grad()
            batch_data = my_multi_train_datasets.get_batches()
            # batch_data = np.transpose(batch_data, (0,4,2,3,1))
            batch_data = np.squeeze(batch_data, 4)
            # print(batch_data.shape)
            if GPU_IN_USE:
                # temp1 = Variable(ToTensor()(temp1)).view(1,-1,29,29)
                batch_data = Variable(
                    torch.tensor(batch_data, dtype=torch.float)).cuda()
            else:
                batch_data = Variable(torch.tensor(batch_data),
                                      requires_grad=True).cpu()

            batch_data1 = Variable(batch_data[:, 0:4:2, :, :],
                                   requires_grad=True)
            # batch_data1 = np.transpose(batch_data1,(0,3,2,1))
            batch_data2 = Variable(batch_data[:, 1:4:2, :, :],
                                   requires_grad=False)
            # batch_data2 = np.transpose(batch_data2,(0,3,2,1))

            output1 = self.model(batch_data1)
            output2 = self.model(batch_data2)
            output2 = Variable(output2, requires_grad=False)
            loss = -1.0 * torch.mean((output1 - output2)**2)
            # loss = loss * -1.0
            loss.backward()
            # print('%d\n'%(idx+1), loss.item())
            self.optimizer.step()
            if (idx + 1) % 200 == 0:
                writer.add_scalar('loss', loss.item(), idx + 1)
                writer.add_image('input1_image', batch_data1[0, 0:1, :, :],
                                 idx + 1)
                writer.add_image('input2_image', batch_data1[0, 1:2, :, :],
                                 idx + 1)
                writer.add_image('input3_image', batch_data2[0, 0:1, :, :],
                                 idx + 1)
                writer.add_image('input4_image', batch_data2[0, 1:2, :, :],
                                 idx + 1)
                writer.add_image('out1_image', output1[0, :, :, :], idx + 1)
                writer.add_image('out2_image', output2[0, :, :, :], idx + 1)
                writer.add_image('input1_diff_image',
                                 (batch_data1[0, 0:1, :, :] -
                                  batch_data2[0, 0:1, :, :])**2, idx + 1)
                writer.add_image('input2_diff_image',
                                 (batch_data1[0, 1:2, :, :] -
                                  batch_data2[0, 1:2, :, :])**2, idx + 1)
                writer.add_image('out_difference', (output1[0, :, :, :] -
                                                    output2[0, :, :, :])**2,
                                 idx + 1)
                # writer.add_image('out2_image', output2[0,:,:,:], idx+1)
                print('%d\n' % (idx + 1), loss.item())
                # writer.export_scalars_to_json("./all_scalars.json")
        writer.close()
Esempio n. 4
0
    def train_c3d(self,
                  max_iteration=100000,
                  restore_tags=True,
                  mse_tag=True,
                  gan_tag=True,
                  g_iteration=5,
                  d_iteration=1):
        self.g_iteration = g_iteration
        self.d_iteration = d_iteration
        my_multi_train_datasets = multi_train_datasets(
            batch_size=self.batch_size,
            video_num=self.video_imgs_num,
            frame_interval=2,
            is_frame=True,
            is_Optical=False,
            crop_size=4,
            img_size=self.img_size_h)
        gpu_options = tf.GPUOptions(allow_growth=True)

        summaries_dir = self.summaries_dir + 'SINGLE_GPU%d.CPTK' % time.time()
        train_writer = tf.summary.FileWriter(summaries_dir)

        with tf.Session(config=tf.ConfigProto(
                gpu_options=gpu_options)) as sess:
            sess.run(tf.global_variables_initializer())
            if restore_tags:
                self.restore_model_weghts(sess)

            start_time = time.time()

            for idx in range(max_iteration):
                batch_data = my_multi_train_datasets.get_batches()
                if mse_tag:
                    sess.run([self.gray_mse_apply],
                             feed_dict={
                                 self.train_in_ph: batch_data,
                                 self.phase: True
                             })
                if gan_tag:
                    for g_iter in range(self.g_iteration):
                        sess.run([self.gray_apply],
                                 feed_dict={
                                     self.train_in_ph: batch_data,
                                     self.phase: True
                                 })
                    for d_iter in range(self.d_iteration):
                        sess.run([self.discriminator_apply],
                                 feed_dict={
                                     self.train_in_ph: batch_data,
                                     self.phase: True
                                 })

                if (idx + 1) % 100 == 0:
                    batch_data = my_multi_train_datasets.get_batches()
                    train_loss = self.fetch_net_loss(sess, batch_data)
                    print('Epoches Idx%d' % (idx + 1))
                    t_elp = time.time() - start_time
                    h, m, s = time_hms(t_elp)
                    print('Time Elapsed : %d hours %d minutes %d seconds' %
                          (h, m, s))
                    t_eta = t_elp / (idx + 1) * (max_iteration - idx - 1)
                    h, m, s = time_hms(t_eta)
                    print('Time ETA : %d hours %d minutes %d seconds' %
                          (h, m, s))
                    print(
                        'generator %.8f, discriminator %.8f ' %
                        (train_loss['generator'], train_loss['discriminator']))
                    print('gray_mse_loss', train_loss['gray_mse_loss'])
                    print('disc_sequence_loss',
                          train_loss['disc_sequence_loss'])
                    train_writer.add_summary(train_loss['summary'], (idx + 1))
                    train_writer.flush()

                if (idx + 1) % 500 == 0 and (idx + 1) >= 2000:
                    # if (idx+1)%50 == 0:
                    self.save_model_weghts(sess)
        return