Exemplo n.º 1
0
def _train():
    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    # all_filenames = 
    prepare_dirs(delete_train_dir=True, shuffle_filename=False)
    filenames_input = get_filenames(dir_file=FLAGS.dataset_input, shuffle_filename=False)
    # if not specify use the same as input
    if FLAGS.dataset_output == '':
        FLAGS.dataset_output = FLAGS.dataset_input
    filenames_output = get_filenames(dir_file=FLAGS.dataset_output, shuffle_filename=False)

    # Separate training and test sets
    # train_filenames = all_filenames[:-FLAGS.test_vectors]
    # test_filenames  = all_filenames[-FLAGS.test_vectors:]
    train_filenames_input = filenames_input[:-FLAGS.test_vectors]
    test_filenames_input  = filenames_input[-FLAGS.test_vectors:]
    train_filenames_output = filenames_output[:-FLAGS.test_vectors]
    test_filenames_output  = filenames_output[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs_one_sources(sess, train_filenames_input, train_filenames_output, 
                                                                        image_size=FLAGS.sample_size, axis_undersample=FLAGS.axis_undersample)
    test_features,  test_labels  = srez_input.setup_inputs_one_sources(sess, test_filenames_input, test_filenames_output,
                                                                        image_size=FLAGS.sample_size, axis_undersample=FLAGS.axis_undersample)
    
    # sample size
    num_sample_train = len(train_filenames_input)
    num_sample_test = len(test_filenames_input)
    print('train on {0} samples and test on {1} samples'.format(num_sample_train, num_sample_test))

    # Add some noise during training (think denoising autoencoders)
    noise_level = .00
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list,
     gene_layers, gene_mlayers] = \
            srez_model.create_model(sess, noisy_train_features, train_labels)

    gene_loss = srez_model.create_generator_loss(disc_fake_output, gene_output, train_features, train_labels)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')
    
    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_model.create_optimizers(gene_loss, gene_var_list,
                                         disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data, num_sample_train, num_sample_test)
Exemplo n.º 2
0
def _train():
    # Setup global tensorflow state
    sess = setup_tensorflow()

    # Prepare directories
    # all_filenames = prepare_dirs(delete_train_dir=True)
    all_filenames = prepare_dirs(delete_train_dir=False)

    # Separate training and test sets
    train_filenames = all_filenames[:-FLAGS.test_vectors]
    test_filenames = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(
        sess, train_filenames)
    test_features, test_labels = srez_input.setup_inputs(sess, test_filenames)

    # Add some noise during training (think denoising autoencoders)
    noise_level = .03
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list,
     dropout] = \
        srez_model.create_model(sess, noisy_train_features, train_labels)

    gene_loss = srez_model.create_generator_loss(disc_fake_output, gene_output,
                                                 train_features)
    disc_real_loss, disc_fake_loss = \
        srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
        srez_model.create_optimizers(gene_loss, gene_var_list,
                                     disc_loss, disc_var_list)

    # Restore variables from checkpoint if EXISTS
    # if tf.gfile.IsDirectory(FLAGS.checkpoint_dir):
    #     filename = 'checkpoint_new.txt'
    #     filename = os.path.join(FLAGS.checkpoint_dir, filename)
    #     saver = tf.train.Saver()
    #     if tf.gfile.Exists(filename):
    #         saver.restore(tf.Session(), filename)
    #         print("Restored previous checkpoint. "
    #               "Warning, Batch number restarted.")

    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data)
Exemplo n.º 3
0
def _train():
    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Separate training and test sets
    rn.shuffle(all_filenames)
    train_filenames = all_filenames[:-FLAGS.test_vectors]
    test_filenames = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(sess,
                                                           train_filenames,
                                                           image_size=32,
                                                           crop_size=128)
    test_features, test_labels = srez_input.setup_inputs(sess,
                                                         test_filenames,
                                                         image_size=32,
                                                         crop_size=128)

    # Add some noise during training (think denoising autoencoders)
    noise_level = .03
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_model.create_model(sess, noisy_train_features, train_labels)

    gene_loss = srez_model.create_generator_loss(disc_fake_output, gene_output,
                                                 train_features)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_model.create_optimizers(gene_loss, gene_var_list,
                                         disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data)
Exemplo n.º 4
0
def _train():
    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    #saver = tf.train.Saver()
    #filename = 'checkpoint'
    #filename = os.path.join(FLAGS.checkpoint_dir, filename)
    #saver.restore(sess,tf.train.latest_checkpoint("./checkpoint/"))
    #print("Model restored from file: %s" % FLAGS.checkpoint_dir)

    # Separate training and test sets
    train_filenames = all_filenames[:-FLAGS.test_vectors]
    test_filenames = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(
        sess, train_filenames)
    test_features, test_labels = srez_input.setup_inputs(sess, test_filenames)

    # Add some noise during training (think denoising autoencoders)
    noise_level = .03
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_model.create_model(sess, noisy_train_features, train_labels)

    gene_loss = srez_model.create_generator_loss(disc_fake_output, gene_output,
                                                 train_features)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_model.create_optimizers(gene_loss, gene_var_list,
                                         disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data)
def _train():
    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=False)

    # Separate training and test sets

    if FLAGS.specific_test:
        train_filenames = all_filenames[:]
        test_filenames = prepare_test_dirs()[:]
    else:
        train_filenames = all_filenames[:-FLAGS.test_vectors]
        test_filenames = all_filenames[-FLAGS.test_vectors:]

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(
        sess, train_filenames)
    test_features, test_labels = srez_input.setup_inputs(sess, test_filenames)

    # Add some noise during training (think denoising autoencoders)
    noise_level = .03
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_model.create_model(sess, noisy_train_features, train_labels)

    gene_loss, gene_l1_loss, gene_ce_loss = srez_model.create_generator_loss(
        disc_fake_output, gene_output, train_features, train_labels)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(2 * FLAGS.disc_real_factor * disc_real_loss,
                       2 * (1 - FLAGS.disc_real_factor) * disc_fake_loss,
                       name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_model.create_optimizers(gene_loss, gene_var_list,
                                         disc_loss, disc_var_list)
    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data)
Exemplo n.º 6
0
def _train():
    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Separate training and test sets
    train_filenames = all_filenames[:-FLAGS.test_vectors]
    test_filenames  = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(sess, train_filenames)
    test_features,  test_labels  = srez_input.setup_inputs(sess, test_filenames)

    # Add some noise during training (think denoising autoencoders)
    noise_level = .03
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_model.create_model(sess, noisy_train_features, train_labels)

    gene_loss = srez_model.create_generator_loss(disc_fake_output, gene_output, train_features)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')
    
    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_model.create_optimizers(gene_loss, gene_var_list,
                                         disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data)
Exemplo n.º 7
0
def _train():
    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Separate training and test sets
    # train_filenames = all_filenames[:-FLAGS.test_vectors]
    # test_filenames  = all_filenames[-FLAGS.test_vectors:]


    # We chose a pre-determined set of faces for the convenience of comparing results across models
    determined_test = [73883-1, 110251-1, 36510-1, 132301-1, 57264-1, 152931-1, 93861-1,
    124938-1, 79512-1, 106152-1, 127384-1, 134028-1, 67874-1,
    10613-1, 198694-1, 100990-1]
    all_filenames = np.array(all_filenames)
    train_filenames = list(np.delete(all_filenames, determined_test))

#     test_filenames = list(all_filenames[determined_test])

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(sess, train_filenames)
    
    # test_features,  test_labels  = srez_input.setup_inputs(sess, test_filenames)
    
    # Test sets are stored in 'testset_label.npy'
    test_labels = np.load('testset_label.npy')
    test_labels = tf.convert_to_tensor(test_labels, dtype = tf.float32)

    if FLAGS.input == 'scaled':
        test_features = tf.image.resize_area(test_labels, [16, 16])
    elif FLAGS.input == 'noise':
        test_features = tf.random_uniform(shape=[16, FLAGS.noise_dimension, FLAGS.noise_dimension, 3],minval= -1., maxval=1.)

    # Add some noise during training (think denoising autoencoders)
    noise_level = FLAGS.train_noise
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    [gene_minput, gene_moutput, gene_output, gene_var_list,
     disc_real_output, disc_fake_output, gradients, disc_var_list] = \
            srez_model.create_model(sess, noisy_train_features, train_labels)
  
    # >>> add summary scalars for test set
    max_samples = 10 # output 10 test images
    gene_output_clipped = tf.maximum(tf.minimum(gene_moutput, 1.0), 0.)
    
    # Calculate the L1 error between output samples and labels as a objective measure of image quality
    if FLAGS.input != 'noise':
      l1_quality  = tf.reduce_sum(tf.abs(gene_output_clipped - test_labels), [1,2,3])
      l1_quality = tf.reduce_mean(l1_quality[:max_samples])
      mse_quality  = tf.reduce_sum(tf.square(gene_output_clipped - test_labels), [1,2,3])
      mse_quality = tf.reduce_mean(mse_quality[:max_samples])
      tf.summary.scalar('l1_quality', l1_quality, collections=['test_scalars'])
      tf.summary.scalar('mse_quality', mse_quality, collections=['test_scalars'])


    gene_loss = srez_model.create_generator_loss(disc_fake_output, gene_output, train_features)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)

    # Different training objectives
    if FLAGS.loss_func == 'dcgan':
        # for DCGAN
        disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')
    elif FLAGS.loss_func == 'wgan':
        # for WGAN
        disc_loss = tf.subtract(disc_real_loss, disc_fake_loss, name='disc_loss')
    elif FLAGS.loss_func == 'wgangp':
        # for WGANGP
        disc_loss = tf.subtract(disc_real_loss, disc_fake_loss)
        slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
        gradient_penalty = tf.reduce_mean((slopes-1.)**2)
        disc_loss = tf.add(disc_loss, FLAGS.LAMBDA*gradient_penalty, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize, d_clip) = \
            srez_model.create_optimizers(gene_loss, gene_var_list, disc_loss, disc_var_list)

    # For tensorboard
    tf.summary.scalar('generator_loss', gene_loss)
    tf.summary.scalar('discriminator_real_loss', disc_real_loss)
    tf.summary.scalar('discriminator_fake_loss', disc_fake_loss)
    tf.summary.scalar('discriminator_tot_loss', disc_loss)


    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data)
Exemplo n.º 8
0
def _train():

    # LoadAndRunOnData=False
    LoadAndRunOnData = myParams.myDict['LoadAndRunOnData'] > 0
    if LoadAndRunOnData:
        # Setup global tensorflow state
        sess, summary_writer = setup_tensorflow()

        # Prepare directories
        # filenames = prepare_dirs(delete_train_dir=False)

        # Setup async input queues
        # features, labels = srez_input.setup_inputs(sess, filenames)
        features, labels = srez_input.setup_inputs(sess, 1)

        # Create and initialize model
        [gene_minput, gene_moutput,
         gene_output, gene_var_list,
         disc_real_output, disc_fake_output, disc_var_list] = \
                srez_modelBase.create_model(sess, features, labels)

        # Restore variables from checkpoint
        print("Adding to saver:")
        var_listX = gene_var_list
        var_listX = [v for v in var_listX if "Bank" not in v.name]
        for line in var_listX:
            print("Adding " + line.name + '           ' +
                  str(line.shape.as_list()))
        print("Saver var list end")

        saver = tf.train.Saver(var_listX)
        # saver = tf.train.Saver()
        filename = 'checkpoint_new'
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-08_16-17-56_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-09_19-44-17_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS__2018-06-29_10-39-13_checkpoint', filename)
        checkpointP = myParams.myDict['LoadAndRunOnData_checkpointP']
        filename = os.path.join(checkpointP, filename)

        saver.restore(sess, filename)

        if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict[
                'Mode'] == 'RegridTry1C' or myParams.myDict[
                    'Mode'] == 'RegridTry1C2' or myParams.myDict[
                        'Mode'] == 'RegridTry1C2_TS' or myParams.myDict[
                            'Mode'] == 'RegridTry1C2_TS2':
            FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
            NMapCR = FullData['NMapCR']

        for r in range(1, myParams.myDict['HowManyToRun']):
            # ifilename='/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RealData/b_Ben14May_Sli5_r' +  f'{r:02}' + '.mat'
            # ifilename='/media/a/DATA/14May18/Ben/meas_MID109_gBP_VD11_U19_4min_FID17944/RealData/Sli11_r' +  f'{r:02}' + '.mat'
            if myParams.myDict['InputMode'] == 'Cart3SB':
                # print('Loaded SensMaps, Shape %d %d %d %d' % (SensMapsSz[0],SensMapsSz[1],SensMapsSz[2],SensMapsSz[3]))
                print('Cart3SB running on real data %d' % r)
                # feature  shape: (1048576, 1, 1) <dtype: 'float32'>
                batch_size = myParams.myDict['batch_size']
                # RealData=np.zeros((batch_size,1048576, 1, 1),np.float32)
                # RealData=np.zeros((batch_size,640000, 1, 1),np.float32)

                # Simulating RealData from ITS, Sens
                MB = GT.getparam('MB')
                TimePoints_ms = GT.getparam('TimePoints_ms')
                nTSC = TimePoints_ms.shape[0]
                nCh = GT.getparam('nccToUse')

                LabelsH = myParams.myDict['LabelsH']
                LabelsW = myParams.myDict['LabelsW']

                H = LabelsH
                W = LabelsW

                SnsFN = '/opt/data/CCSensMaps.mat'
                fS = h5py.File(SnsFN, 'r')
                SensMaps = fS['SensCC']
                SensMaps = SensMaps['real'] + 1j * SensMaps['imag']
                SensMapsSz = SensMaps.shape
                print('r Loaded SensMaps, Shape %d %d %d %d' %
                      (SensMapsSz[0], SensMapsSz[1], SensMapsSz[2],
                       SensMapsSz[3]))
                SensMaps = SensMaps[:, :, :, :nCh]

                NumSensMapsInFile = SensMaps.shape[0]
                # IdxS=15
                for b in range(0, MB):
                    # if b==1:
                    #     IdxB2=tf.random_uniform([1],minval=12,maxval=19,dtype=tf.int32)
                    #     IdxS=IdxS+IdxB2[0]
                    #     IdxS=tf.cond(IdxS[0]>=NumSensMapsInFile, lambda: IdxS-NumSensMapsInFile, lambda: IdxS)

                    # Sens=np.squeeze(SensMaps[IdxS,:,:,:],axis=0)
                    Sens = (SensMaps[15, :, :, :])

                    Sens = Sens[:H, :W, :nCh]

                    # Sens = tf.image.random_flip_left_right(Sens)
                    # Sens = tf.image.random_flip_up_down(Sens)
                    # uS=tf.random_uniform([1])
                    # Sens=tf.cond(uS[0]<0.5, lambda: tf.identity(Sens), lambda: tf.image.rot90(Sens))
                    SensMsk = GT.NP_addDim(
                        np.sum(np.abs(Sens), axis=2) > 0).astype(np.complex64)
                    Sens = GT.NP_addDim(Sens)

                    if b == 0:
                        SensMB = Sens
                        SensMskMB = SensMsk

                    # else:
                    #     SensMB=tf.concat([SensMB,Sens],axis=3) #     SensMB H W nCh MB
                    #     SensMskMB=tf.concat([SensMskMB,SensMsk],axis=2) #     SensMskMB H W MB

                # nToLoad=myParams.myDict['nToLoad']
                # LoadAndRunOnData=myParams.myDict['LoadAndRunOnData']>0
                # if LoadAndRunOnData:
                nToLoad = 300

                print('r loading images ' + time.strftime("%Y-%m-%d %H:%M:%S"))
                GREBaseP = '/opt/data/'
                SFN = GREBaseP + 'All_Orientation-0x.mat'
                f = h5py.File(SFN, 'r')
                I = f['CurSetAll'][0:nToLoad]
                print('r Loaded images ' + time.strftime("%Y-%m-%d %H:%M:%S"))

                SendTSCest = GT.getparam('SendTSCest') > 0
                HamPow = GT.getparam('HamPow')

                # def TFexpix(X): return tf.exp(tf.complex(tf.zeros_like(X),X))
                def NPexpix(X):
                    return np.exp(1j * X)

                for b in range(0, MB):
                    # TFI = tf.constant(np.int16(I))
                    # Idx=tf.random_uniform([1],minval=0,maxval=I.shape[0],dtype=tf.int32)
                    Idx = 133

                    Data4 = (I[Idx, :, :, :])
                    # Data4=tf.squeeze(tf.slice(I,[Idx[0],0,0,0],[1,-1,-1,-1]),axis=0)
                    # Data4 = tf.image.random_flip_left_right(Data4)
                    # Data4 = tf.image.random_flip_up_down(Data4)

                    # u1=tf.random_uniform([1])
                    # Data4=tf.cond(u1[0]<0.5, lambda: tf.identity(Data4), lambda: tf.image.rot90(Data4))

                    # Data4 = tf.random_crop(Data4, [H, W, 4])
                    # Data4 = tf.random_crop(Data4, [:H, :W, 4])
                    Data4 = Data4[:H, :W, :]

                    # M=tf.slice(Data4,[0,0,0],[-1,-1,1])
                    # Ph=tf.slice(Data4,[0,0,1],[-1,-1,1])
                    # feature=tf.cast(M,tf.complex64)*TFexpix(Ph)
                    M = Data4[:, :, 0]
                    Ph = Data4[:, :, 1]
                    feature = M.astype(np.complex64) * NPexpix(Ph)

                    feature = GT.NP_addDim(feature) * SensMskMB[:, :, b:b + 1]

                    T2S_ms = Data4[:, :, 2]
                    # T2S_ms = tf.where( T2S_ms<1.5, 10000 * tf.ones_like( T2S_ms ), T2S_ms )
                    T2S_ms[T2S_ms < 1.5] = 10000

                    B0_Hz = Data4[:, :, 3]
                    # B0_Hz=M*0

                    # T2S_ms = tf.where( tf.is_nan(T2S_ms), 10000 * tf.ones_like( T2S_ms ), T2S_ms )
                    T2S_ms[np.isnan(T2S_ms)] = 10000
                    # B0_Hz = tf.where( tf.is_nan(B0_Hz), tf.zeros_like( B0_Hz ), B0_Hz )
                    B0_Hz[np.isnan(B0_Hz)] = 0

                    if SendTSCest:
                        # HamPowA=10
                        HamPowA = HamPow
                        HamA = np.roll(np.hamming(H), np.int32(H / 2))
                        HamA = np.power(HamA, HamPowA)
                        HamXA = np.reshape(HamA, (1, H, 1))
                        HamYA = np.reshape(HamA, (1, 1, W))

                        B0_Hz_Smoothed = np.transpose(
                            GT.NP_addDim(B0_Hz.astype(np.complex64)),
                            (2, 0, 1))
                        B0_Hz_Smoothed = np.fft.fft2(B0_Hz_Smoothed)
                        B0_Hz_Smoothed = B0_Hz_Smoothed * HamXA
                        B0_Hz_Smoothed = B0_Hz_Smoothed * HamYA
                        B0_Hz_Smoothed = np.fft.ifft2(B0_Hz_Smoothed)
                        B0_Hz_Smoothed = np.transpose(B0_Hz_Smoothed,
                                                      (1, 2, 0))
                        B0_Hz_Smoothed = np.real(B0_Hz_Smoothed)

                        TSCest = np.exp(1j * 2 * np.pi *
                                        (B0_Hz_Smoothed * TimePoints_ms /
                                         1000).astype(np.complex64))
                        # TSCest=np.ones(TSCest.shape).astype(np.complex64)
                        print('TSCest shape: ' + str(TSCest.shape))
                        # TSCest=TSCest*0+1
                        # print('TSCest shape: ' + str(TSCest.shape))
                        # print('reducing B0')
                        # print('B0_Hz shape: ' + str(B0_Hz.shape))
                        # print('B0_Hz_Smoothed shape: ' + str(B0_Hz_Smoothed.shape))
                        # B0_Hz=B0_Hz-np.squeeze(B0_Hz_Smoothed)
                        # print('B0_Hz shape: ' + str(B0_Hz.shape))

                    # urand_ms=tf.random_uniform([1])*12
                    # urand_sec=(tf.random_uniform([1])*2-1)*3/1000

                    # feature=feature*tf.cast(tf.exp(-urand_ms/T2S_ms),tf.complex64)
                    # feature=feature*TFexpix(2*np.pi*B0_Hz*urand_sec)

                    mx = M.max()
                    mx = np.maximum(mx, 1)
                    mx = mx.astype(np.complex64)

                    feature = feature / mx

                    CurIWithPhase = feature

                    TSCM = np.exp(-TimePoints_ms / GT.NP_addDim(T2S_ms))
                    TSCP = np.exp(1j * 2 * np.pi *
                                  (GT.NP_addDim(B0_Hz) * TimePoints_ms /
                                   1000).astype(np.complex64))
                    TSC = TSCM.astype(np.complex64) * TSCP

                    ITSbase = CurIWithPhase * TSC  # ITSbase is H,W,nTSC

                    TSC = GT.NP_addDim(TSC)
                    ITSbase = GT.NP_addDim(ITSbase)
                    if b == 0:
                        CurIWithPhaseMB = CurIWithPhase
                        TSCMB = TSC
                        ITSbaseMB = ITSbase
                        if SendTSCest:
                            TSCest = GT.NP_addDim(TSCest)
                            TSCMBest = TSCest
                    # else:
                    #     CurIWithPhaseMB=tf.concat([CurIWithPhaseMB,CurIWithPhase],axis=2) #     CurIWithPhaseMB H W MB
                    #     TSCMB=tf.concat([TSCMB,TSC],axis=3) #     TSCMB H W nTSC MB
                    #     ITSbaseMB=tf.concat([ITSbaseMB,ITSbase],axis=3) #     ITSbaseMB H W nTSC MB
                    #     if SendTSCest:
                    #         TSCMBest=tf.stack([TSCMBest,TSCest],axis=3)
                print('r ok 2')
                ITS_P = np.transpose(
                    GT.NP_addDim(ITSbaseMB),
                    (4, 0, 1, 2, 3))  # /batch_size/,H,W,nTSC,MB

                Msk3 = np.zeros((H, W, nTSC, 1, 1, 1))

                PEShifts = GT.getparam('PEShifts')
                PEJump = GT.getparam('PEJump')
                print('r Using PEShifts')
                for i in range(nTSC):
                    Msk3[PEShifts[i]::PEJump, :, i, :, :, :] = 1

                Msk3 = np.complex64(Msk3)

                # GT.setparam('CartMask',Msk3)

                Sens6 = SensMB[:, :, np.newaxis, :, :,
                               np.newaxis]  # H,W,/nTS/,nCh,MB,/batch_size/

                # AHA_ITS=GT.Cartesian_OPHOP_ITS_MB(ITS_P,Sens6,Msk3)

                ITS = np.transpose(ITSbaseMB, (0, 3, 2, 1))  # H, nTSC, W
                ITS = np.reshape(ITS, (H, W * nTSC * MB, 1))
                ITS_RI = GT.NP_ConcatRIOn2(ITS)

                Sensc = SensMB
                Sens1D = GT.NP_ConcatRIOn0(np.reshape(Sensc, (-1, 1, 1)))
                feature = Sens1D

                AHA_ITS = GT.NP_Cartesian_OPHOP_ITS_MB(ITS_P, Sens6, Msk3)
                # new simpler approach
                if SendTSCest:
                    TSCMBest_P = np.transpose(
                        GT.NP_addDim(TSCMBest),
                        (4, 0, 1, 2, 3))  # /batch_size/,H,W,nTSC,MB
                    AHA_ITS = AHA_ITS * np.conj(TSCMBest_P)

                #         send AHA_ITS
                AHA_ITS_1D = GT.NP_ConcatRIOn0(np.reshape(AHA_ITS, (-1, 1, 1)))
                feature = np.concatenate((feature, AHA_ITS_1D), axis=0)

                if SendTSCest:
                    TSCest1D = GT.NP_ConcatRIOn0(
                        np.reshape(TSCMBest_P, (-1, 1, 1)))
                    feature = np.concatenate((feature, TSCest1D), axis=0)

                RealData = np.tile(feature, (batch_size, 1, 1, 1))

                # End simulating RealData
                Real_feature = RealData
            else:
                ifilenamePrefix = myParams.myDict['LoadAndRunOnData_Prefix']
                #             ifilename=ifilenamePrefix +  f'{r:02}' + '.mat'
                ifilename = ifilenamePrefix + '%02d.mat' % (r)
                RealData = scipy.io.loadmat(ifilename)
                RealData = RealData['Data']

                if RealData.ndim == 2:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1], 1, 1))
                if RealData.ndim == 3:
                    RealData = RealData.reshape(
                        (RealData.shape[0], RealData.shape[1],
                         RealData.shape[2], 1))

                Real_feature = RealData

                # if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict['Mode'] == 'RegridTry1C' or myParams.myDict['Mode'] == 'RegridTry1C2' or myParams.myDict['Mode'] == 'RegridTry1C2_TS' or myParams.myDict['Mode'] == 'RegridTry1C2_TS2':
                #     batch_size=myParams.myDict['batch_size']

                #     Real_feature=np.reshape(RealData[0],[RealData.shape[1]])
                #     Real_feature=np.take(Real_feature,NMapCR)
                #     Real_feature=np.tile(Real_feature, (batch_size,1,1,1))

            if myParams.myDict['InputMode'] == 'RegridTry1' or myParams.myDict[
                    'InputMode'] == 'RegridTry2':
                # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
                FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
                NMapCR = FullData['NMapCR']

                batch_size = myParams.myDict['batch_size']

                Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
                Real_feature = np.take(Real_feature, NMapCR)
                Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

            Real_dictOut = {gene_minput: Real_feature}

            gene_RealOutput = sess.run(gene_moutput, feed_dict=Real_dictOut)

            OnRealData = {}
            OnRealDataM = gene_RealOutput
            #             filenamex = 'OnRealData' + f'{r:02}' + '.mat'
            filenamexBase = 'OnRealData' + '%02d' % (r)
            filenamex = filenamexBase + '.mat'

            LoadAndRunOnData_OutP = myParams.myDict['LoadAndRunOnData_OutP']
            filename = os.path.join(LoadAndRunOnData_OutP, filenamex)
            OnRealData['x'] = OnRealDataM
            scipy.io.savemat(filename, OnRealData)

            image = np.sqrt(
                np.square(OnRealDataM[0, -H:, :(W * 3), 0]) +
                np.square(OnRealDataM[0, -H:, :(W * 3), 1]))
            filenamep = filenamexBase + '.png'
            filename = os.path.join(LoadAndRunOnData_OutP, filenamep)
            imageio.imwrite(filename, image)

        print('Saved recon of real data')
        exit()

    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Separate training and test sets
    #train_filenames = all_filenames[:-FLAGS.test_vectors]
    train_filenames = all_filenames
    #test_filenames  = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    #pdb.set_trace()

    # ggg Signal Bank stuff:
    if myParams.myDict['BankSize'] > 0:
        if myParams.myDict['InputMode'] == 'RegridTry3FMB':
            BankSize = myParams.myDict['BankSize'] * 2

            # BankInit=np.zeros([BankSize,myParams.myDict['DataH'],1,1])
            # LBankInit=np.zeros([BankSize,myParams.myDict['LabelsH'],myParams.myDict['LabelsW'], 2])
            with tf.variable_scope("aaa"):
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32,trainable=False)
        else:
            BankSize = myParams.myDict['BankSize']

            BankInit = np.zeros([BankSize, myParams.myDict['DataH'], 1, 1])
            LBankInit = np.zeros([
                BankSize, myParams.myDict['LabelsH'],
                myParams.myDict['LabelsW'], 2
            ])
            with tf.variable_scope("aaa"):
                # Bank=tf.get_variable("Bank",initializer=tf.cast(BankInit, tf.float32),dtype=tf.float32)
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32)

        init_new_vars_op = tf.variables_initializer([Bank, LBank])
        sess.run(init_new_vars_op)
    # ggg end Signal Bank stuff:

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(
        sess, train_filenames)
    # test_features, test_labels = srez_input.setup_inputs(sess, train_filenames,TestStuff=True)
    test_features = train_features
    test_labels = train_labels
    #test_features,  test_labels  = srez_input.setup_inputs(sess, test_filenames)

    print('starting' + time.strftime("%Y-%m-%d %H:%M:%S"))
    print('train_features %s' % (train_features))
    print('train_labels %s' % (train_labels))

    # Add some noise during training (think denoising autoencoders)
    noise_level = myParams.myDict['noise_level']
    AddNoise = noise_level > 0.0
    if AddNoise:
        noisy_train_features = train_features + tf.random_normal(
            train_features.get_shape(), stddev=noise_level)
    else:
        noisy_train_features = train_features

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_modelBase.create_model(sess, noisy_train_features, train_labels)

    # gene_VarNamesL=[];
    # for line in gene_var_list: gene_VarNamesL.append(line.name+'           ' + str(line.shape.as_list()))
    # gene_VarNamesL.sort()

    # for line in gene_VarNamesL: print(line)
    # # var_23 = [v for v in tf.global_variables() if v.name == "gene/GEN_L020/C2D_weight:0"][0]

    # for line in sess.graph.get_operations(): print(line)
    # Gen3_ops=[]
    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # GenC2D_ops= [v for v in sess.graph.get_operations()]

    # GenC2D_ops= [v for v in tf.get_operations() if "weight" in v.name]
    # GenC2D_ops= [v for v in GenC2D_ops if "C2D" in v.name]
    # for x in GenC2D_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # for x in GenC2D_ops: print(x.name)

    AEops = [
        v for v in sess.graph.get_operations()
        if "AE" in v.name and not ("_1/" in v.name)
    ]
    # AEops = [v for v in td.sess.graph.get_operations() if "Pixel" in v.name and not ("_1/" in v.name) and not ("opti" in v.name) and not ("Assign" in v.name) and not ("read" in v.name) and not ("Adam" in v.name)]
    AEouts = [v.outputs[0] for v in AEops]
    varsForL1 = AEouts
    # varsForL1=AEouts[0:-1]
    # varsForL1=AEouts[1:]

    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    print("Vars for l2 loss:")
    varws = [
        v for v in tf.global_variables()
        if (("weight" in v.name) or ("ConvNet" in v.name))
    ]
    varsForL2 = [v for v in varws if "C2D" in v.name]
    varsForL2 = [v for v in varws if "disc" not in v.name]
    varsForL2 = [v for v in varws if "bias" not in v.name]
    for line in varsForL2:
        print(line.name + '           ' + str(line.shape.as_list()))

    print("Vars for Phase-only loss:")
    varws = [v for v in tf.global_variables() if "weight" in v.name]
    varsForPhaseOnly = [v for v in varws if "SharedOverFeat" in v.name]
    for line in varsForPhaseOnly:
        print(line.name + '           ' + str(line.shape.as_list()))

    # pdb.set_trace()

    gene_loss, MoreOut, MoreOut2, MoreOut3 = srez_modelBase.create_generator_loss(
        disc_fake_output, gene_output, train_features, train_labels, varsForL1,
        varsForL2, varsForPhaseOnly)
    disc_real_loss, disc_fake_loss = \
                     srez_modelBase.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_modelBase.create_optimizers(gene_loss, gene_var_list, disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())

    #pdb.set_trace()
    # ggg: to restore session
    RestoreSession = False
    if RestoreSession:
        saver = tf.train.Saver()
        filename = 'checkpoint_new'
        filename = os.path.join(myParams.myDict['checkpoint_dir'], filename)
        saver.restore(sess, filename)

    srez_train.train_model(train_data)
Exemplo n.º 9
0
def _train():
    # Setup global tensorflow state
    sess, _oldwriter = setup_tensorflow()

    # image_size
    if FLAGS.sample_size_y > 0:
        image_size = [FLAGS.sample_size, FLAGS.sample_size_y]
    else:
        image_size = [FLAGS.sample_size, FLAGS.sample_size]

    # Prepare train and test directories (SEPARATE FOLDER)
    prepare_dirs(delete_train_dir=True, shuffle_filename=False)
    filenames_input_train = get_filenames(dir_file=FLAGS.dataset_train,
                                          shuffle_filename=True)
    filenames_output_train = get_filenames(dir_file=FLAGS.dataset_train,
                                           shuffle_filename=True)
    filenames_input_test = get_filenames(dir_file=FLAGS.dataset_test,
                                         shuffle_filename=False)
    filenames_output_test = get_filenames(dir_file=FLAGS.dataset_test,
                                          shuffle_filename=False)

    ## Prepare directories (SAME FOLDER)
    #prepare_dirs(delete_train_dir=True, shuffle_filename=False)
    #filenames_input = get_filenames(dir_file=FLAGS.dataset_input, shuffle_filename=False)
    ## if not specify use the same as input
    #if FLAGS.dataset_output == '':
    #FLAGS.dataset_output = FLAGS.dataset_input
    #filenames_output = get_filenames(dir_file=FLAGS.dataset_output, shuffle_filename=False)

    # check input and output sample number matches (SEPARATE FOLDER)
    assert (len(filenames_input_train) == len(filenames_output_train))
    num_filename_train = len(filenames_input_train)
    assert (len(filenames_input_test) == len(filenames_output_test))
    num_filename_test = len(filenames_input_test)

    #print(num_filename_train)
    #print(num_filename_test)
    #print(filenames_output_test)

    # check input and output sample number matches (SAME FOLDER)
    #assert(len(filenames_input)==len(filenames_output))
    #num_filename_all = len(filenames_input)

    # Permutate train and test split (SEPARATE FOLDERS)
    if FLAGS.permutation_split:
        index_permutation_split = random.sample(num_filename_train,
                                                num_filename_train)
        filenames_input_train = [
            filenames_input_train[x] for x in index_permutation_split
        ]
        filenames_output_train = [
            filenames_output_train[x] for x in index_permutation_split
        ]
        #print(np.shape(filenames_input_train))

    if FLAGS.permutation_split:
        index_permutation_split = random.sample(num_filename_test,
                                                num_filename_test)
        filenames_input_test = [
            filenames_input_test[x] for x in index_permutation_split
        ]
        filenames_output_test = [
            filenames_output_test[x] for x in index_permutation_split
        ]
    #print('filenames_input[:20]',filenames_input[:20])

    # Permutate test split (SAME FOLDERS)
    #if FLAGS.permutation_split:
    #index_permutation_split = random.sample(num_filename_test, num_filename_test)
    #filenames_input_test = [filenames_input_test[x] for x in index_permutation_split]
    #filenames_output_test = [filenames_output_test[x] for x in index_permutation_split]
    #print('filenames_input[:20]',filenames_input[:20])

    # Separate training and test sets (SEPARATE FOLDERS)
    train_filenames_input = filenames_input_train[:FLAGS.sample_train]
    train_filenames_output = filenames_output_train[:FLAGS.sample_train]

    test_filenames_input = filenames_input_test[:FLAGS.sample_test]
    test_filenames_output = filenames_output_test[:FLAGS.sample_test]
    #print('test_filenames_input', test_filenames_input)
    #print('train_filenames_input', train_filenames_input)

    # Separate training and test sets (SAME FOLDERS)
    #train_filenames_input = filenames_input[:-FLAGS.sample_test]
    #train_filenames_output = filenames_output[:-FLAGS.sample_test]
    #test_filenames_input  = filenames_input[-FLAGS.sample_test:]
    #test_filenames_output  = filenames_output[-FLAGS.sample_test:]
    #print('test_filenames_input[:20]',test_filenames_input[:20])

    # randomly subsample for train
    if FLAGS.subsample_train > 0:

        index_sample_train_selected = random.sample(
            range(len(train_filenames_input)), FLAGS.subsample_train)
        if not FLAGS.permutation_train:
            index_sample_train_selected = sorted(index_sample_train_selected)
        train_filenames_input = [
            train_filenames_input[x] for x in index_sample_train_selected
        ]
        train_filenames_output = [
            train_filenames_output[x] for x in index_sample_train_selected
        ]
        print('randomly sampled {0} from {1} train samples'.format(
            len(train_filenames_input),
            len(filenames_input_train[:-FLAGS.sample_test])))

    # randomly sub-sample for test
    if FLAGS.subsample_test > 0:
        index_sample_test_selected = random.sample(
            range(len(test_filenames_input)), FLAGS.subsample_test)
        print(len(test_filenames_input))
        print(FLAGS.subsample_test)
        if not FLAGS.permutation_test:
            index_sample_test_selected = sorted(index_sample_test_selected)
        test_filenames_input = [
            test_filenames_input[x] for x in index_sample_test_selected
        ]
        test_filenames_output = [
            test_filenames_output[x] for x in index_sample_test_selected
        ]
        print('randomly sampled {0} from {1} test samples'.format(
            len(test_filenames_input),
            len(test_filenames_input[:-FLAGS.sample_test])))

    #print('test_filenames_input',test_filenames_input)

    # get undersample mask
    from scipy import io as sio
    try:
        content_mask = sio.loadmat(FLAGS.sampling_pattern)
        key_mask = [x for x in content_mask.keys() if not x.startswith('_')]
        mask = content_mask[key_mask[0]]
    except:
        mask = None

    print(len(train_filenames_input))
    print(len(train_filenames_output))
    print(len(test_filenames_input))
    print(len(test_filenames_output))

    # Setup async input queues
    train_features, train_labels, train_masks = srez_input.setup_inputs_one_sources(
        sess,
        train_filenames_input,
        train_filenames_output,
        image_size=image_size,
        # undersampling
        axis_undersample=FLAGS.axis_undersample,
        r_factor=FLAGS.R_factor,
        r_alpha=FLAGS.R_alpha,
        r_seed=FLAGS.R_seed,
        sampling_mask=mask)
    test_features, test_labels, test_masks = srez_input.setup_inputs_one_sources(
        sess,
        test_filenames_input,
        test_filenames_output,
        image_size=image_size,
        # undersampling
        axis_undersample=FLAGS.axis_undersample,
        r_factor=FLAGS.R_factor,
        r_alpha=FLAGS.R_alpha,
        r_seed=FLAGS.R_seed,
        sampling_mask=mask)

    print('features_size', train_features.get_shape())
    print('labels_size', train_labels.get_shape())
    print('masks_size', train_masks.get_shape())

    # sample train and test
    num_sample_train = len(train_filenames_input)
    num_sample_test = len(test_filenames_input)
    print('train on {0} samples and test on {1} samples'.format(
        num_sample_train, num_sample_test))

    # Add some noise during training (think denoising autoencoders)
    noise_level = .00
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, label_minput, gene_moutput, gene_moutput_list, \
     gene_output, gene_output_list, gene_var_list, gene_layers_list, gene_mlayers_list, gene_mask_list, gene_mask_list_0, \
     disc_real_output, disc_fake_output, disc_var_list, train_phase,disc_layers, eta, nmse, kappa] = \
            srez_model.create_model(sess, noisy_train_features, train_labels, train_masks, architecture=FLAGS.architecture)

    #train_phase = tf.placeholder(tf.bool, [])

    gene_loss, gene_dc_loss, gene_ls_loss, gene_mse_loss, list_gene_losses, gene_mse_factor = srez_model.create_generator_loss(
        disc_fake_output, gene_output, gene_output_list, train_features,
        train_labels, train_masks)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_model.create_optimizers(gene_loss, gene_var_list,
                                         disc_loss, disc_var_list)

    # tensorboard
    summary_op = tf.summary.merge_all()

    #restore variables from checkpoint
    filename = 'checkpoint_new.txt'
    filename = os.path.join(FLAGS.checkpoint_dir, filename)
    metafile = filename + '.meta'
    """
    if tf.gfile.Exists(metafile):
        saver = tf.train.Saver()
        print("Loading checkpoint from file `%s'" % (filename,))
        saver.restore(sess, filename)
    else:
        print("No checkpoint `%s', train from scratch" % (filename,))
        sess.run(tf.global_variables_initializer())
"""

    print("No checkpoint `%s', train from scratch" % (filename, ))
    print(
        np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        ]))
    sess.run(tf.global_variables_initializer())

    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(sess, train_data, num_sample_train, num_sample_test)
Exemplo n.º 10
0
def _train():

    # LoadAndRunOnData=False
    LoadAndRunOnData = myParams.myDict['LoadAndRunOnData'] > 0
    if LoadAndRunOnData:
        # Setup global tensorflow state
        sess, summary_writer = setup_tensorflow()

        # Prepare directories
        filenames = prepare_dirs(delete_train_dir=False)

        # Setup async input queues
        features, labels = srez_input.setup_inputs(sess, filenames)

        # Create and initialize model
        [gene_minput, gene_moutput,
         gene_output, gene_var_list,
         disc_real_output, disc_fake_output, disc_var_list] = \
                srez_modelBase.create_model(sess, features, labels)

        # Restore variables from checkpoint
        print("Adding to saver:")
        var_listX = gene_var_list
        var_listX = [v for v in var_listX if "Bank" not in v.name]
        for line in var_listX:
            print("Adding " + line.name + '           ' +
                  str(line.shape.as_list()))
        print("Saver var list end")

        saver = tf.train.Saver(var_listX)
        # saver = tf.train.Saver()
        filename = 'checkpoint_new'
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-08_16-17-56_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS2_dataNeighborhoodRCB0__2018-06-09_19-44-17_checkpoint', filename)
        # filename = os.path.join('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RegridTry1C2_TS__2018-06-29_10-39-13_checkpoint', filename)
        checkpointP = myParams.myDict['LoadAndRunOnData_checkpointP']
        filename = os.path.join(checkpointP, filename)

        saver.restore(sess, filename)

        if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict[
                'Mode'] == 'RegridTry1C' or myParams.myDict[
                    'Mode'] == 'RegridTry1C2' or myParams.myDict[
                        'Mode'] == 'RegridTry1C2_TS' or myParams.myDict[
                            'Mode'] == 'RegridTry1C2_TS2':
            FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
            NMapCR = FullData['NMapCR']

        for r in range(1, myParams.myDict['HowManyToRun']):
            # ifilename='/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/srez/RealData/b_Ben14May_Sli5_r' +  f'{r:02}' + '.mat'
            # ifilename='/media/a/DATA/14May18/Ben/meas_MID109_gBP_VD11_U19_4min_FID17944/RealData/Sli11_r' +  f'{r:02}' + '.mat'
            ifilenamePrefix = myParams.myDict['LoadAndRunOnData_Prefix']
            # ifilename=ifilenamePrefix +  f'{r:02}' + '.mat'
            ifilename = ifilenamePrefix + ('%02d' % r) + '.mat'
            # ifilename=ifilenamePrefix + r + '.mat'
            RealData = scipy.io.loadmat(ifilename)
            RealData = RealData['Data']

            if RealData.ndim == 2:
                RealData = RealData.reshape(
                    (RealData.shape[0], RealData.shape[1], 1, 1))
            if RealData.ndim == 3:
                RealData = RealData.reshape(
                    (RealData.shape[0], RealData.shape[1], RealData.shape[2],
                     1))

            Real_feature = RealData

            # if myParams.myDict['Mode'] == 'RegridTry1' or myParams.myDict['Mode'] == 'RegridTry1C' or myParams.myDict['Mode'] == 'RegridTry1C2' or myParams.myDict['Mode'] == 'RegridTry1C2_TS' or myParams.myDict['Mode'] == 'RegridTry1C2_TS2':
            #     batch_size=myParams.myDict['batch_size']

            #     Real_feature=np.reshape(RealData[0],[RealData.shape[1]])
            #     Real_feature=np.take(Real_feature,NMapCR)
            #     Real_feature=np.tile(Real_feature, (batch_size,1,1,1))

            if myParams.myDict['InputMode'] == 'RegridTry1' or myParams.myDict[
                    'InputMode'] == 'RegridTry2':
                # FullData=scipy.io.loadmat('/media/a/f38a5baa-d293-4a00-9f21-ea97f318f647/home/a/TF/NMapIndTesta.mat')
                FullData = scipy.io.loadmat(myParams.myDict['NMAP_FN'])
                NMapCR = FullData['NMapCR']

                batch_size = myParams.myDict['batch_size']

                Real_feature = np.reshape(RealData[0], [RealData.shape[1]])
                Real_feature = np.take(Real_feature, NMapCR)
                Real_feature = np.tile(Real_feature, (batch_size, 1, 1, 1))

            Real_dictOut = {gene_minput: Real_feature}

            gene_RealOutput = sess.run(gene_moutput, feed_dict=Real_dictOut)

            OnRealData = {}
            OnRealDataM = gene_RealOutput
            # filenamex = 'OnRealData' + f'{r:02}' + '.mat'
            filenamex = 'OnRealData' + ('%02d' % r) + '.mat'
            # filenamex = 'OnRealData' + r + '.mat'

            LoadAndRunOnData_OutP = myParams.myDict['LoadAndRunOnData_OutP']
            filename = os.path.join(LoadAndRunOnData_OutP, filenamex)
            OnRealData['x'] = OnRealDataM
            scipy.io.savemat(filename, OnRealData)

        print('Saved recon of real data')
        exit()

    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # Prepare directories
    all_filenames = prepare_dirs(delete_train_dir=True)

    # Separate training and test sets
    #train_filenames = all_filenames[:-FLAGS.test_vectors]
    train_filenames = all_filenames
    #test_filenames  = all_filenames[-FLAGS.test_vectors:]

    # TBD: Maybe download dataset here

    #pdb.set_trace()

    # ggg Signal Bank stuff:
    if myParams.myDict['BankSize'] > 0:
        if myParams.myDict['InputMode'] == 'RegridTry3FMB':
            BankSize = myParams.myDict['BankSize'] * 2

            # BankInit=np.zeros([BankSize,myParams.myDict['DataH'],1,1])
            # LBankInit=np.zeros([BankSize,myParams.myDict['LabelsH'],myParams.myDict['LabelsW'], 2])
            with tf.variable_scope("aaa"):
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32,trainable=False)
        else:
            BankSize = myParams.myDict['BankSize']

            BankInit = np.zeros([BankSize, myParams.myDict['DataH'], 1, 1])
            LBankInit = np.zeros([
                BankSize, myParams.myDict['LabelsH'],
                myParams.myDict['LabelsW'], 2
            ])
            with tf.variable_scope("aaa"):
                # Bank=tf.get_variable("Bank",initializer=tf.cast(BankInit, tf.float32),dtype=tf.float32)
                Bank = tf.get_variable(
                    "Bank",
                    shape=[BankSize, myParams.myDict['DataH'], 1, 1],
                    dtype=tf.float32,
                    trainable=False)
                LBank = tf.get_variable("LBank",
                                        shape=[
                                            BankSize,
                                            myParams.myDict['LabelsH'],
                                            myParams.myDict['LabelsW'], 2
                                        ],
                                        dtype=tf.float32,
                                        trainable=False)
                # LBank=tf.get_variable("LBank",initializer=tf.cast(LBankInit, tf.float32),dtype=tf.float32)

        init_new_vars_op = tf.variables_initializer([Bank, LBank])
        sess.run(init_new_vars_op)
    # ggg end Signal Bank stuff:

    # Setup async input queues
    train_features, train_labels = srez_input.setup_inputs(
        sess, train_filenames)
    # test_features, test_labels = srez_input.setup_inputs(sess, train_filenames,TestStuff=True)
    test_features = train_features
    test_labels = train_labels
    #test_features,  test_labels  = srez_input.setup_inputs(sess, test_filenames)

    print('starting' + time.strftime("%Y-%m-%d %H:%M:%S"))
    print('train_features %s' % (train_features))
    print('train_labels %s' % (train_labels))

    # Add some noise during training (think denoising autoencoders)
    noise_level = myParams.myDict['noise_level']
    AddNoise = noise_level > 0.0
    if AddNoise:
        noisy_train_features = train_features + tf.random_normal(
            train_features.get_shape(), stddev=noise_level)
    else:
        noisy_train_features = train_features

    # Create and initialize model
    [gene_minput, gene_moutput,
     gene_output, gene_var_list,
     disc_real_output, disc_fake_output, disc_var_list] = \
            srez_modelBase.create_model(sess, noisy_train_features, train_labels)

    # gene_VarNamesL=[];
    # for line in gene_var_list: gene_VarNamesL.append(line.name+'           ' + str(line.shape.as_list()))
    # gene_VarNamesL.sort()

    # for line in gene_VarNamesL: print(line)
    # # var_23 = [v for v in tf.global_variables() if v.name == "gene/GEN_L020/C2D_weight:0"][0]

    # for line in sess.graph.get_operations(): print(line)
    # Gen3_ops=[]
    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # GenC2D_ops= [v for v in sess.graph.get_operations()]

    # GenC2D_ops= [v for v in tf.get_operations() if "weight" in v.name]
    # GenC2D_ops= [v for v in GenC2D_ops if "C2D" in v.name]
    # for x in GenC2D_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    # for x in GenC2D_ops: print(x.name)

    AEops = [
        v for v in sess.graph.get_operations()
        if "AE" in v.name and not ("_1/" in v.name)
    ]
    # AEops = [v for v in td.sess.graph.get_operations() if "Pixel" in v.name and not ("_1/" in v.name) and not ("opti" in v.name) and not ("Assign" in v.name) and not ("read" in v.name) and not ("Adam" in v.name)]
    AEouts = [v.outputs[0] for v in AEops]
    varsForL1 = AEouts
    # varsForL1=AEouts[0:-1]
    # varsForL1=AEouts[1:]

    # for line in sess.graph.get_operations():
    #     if 'GEN_L003' in line.name:
    #         Gen3_ops.append(line)

    #     # LL=QQQ.outputs[0]

    # for x in Gen3_ops: print(x.name +'           ' + str(x.outputs[0].shape))

    print("Vars for l2 loss:")
    varws = [v for v in tf.global_variables() if "weight" in v.name]
    varsForL2 = [v for v in varws if "C2D" in v.name]
    varsForL2 = [v for v in varws if "disc" not in v.name]
    for line in varsForL2:
        print(line.name + '           ' + str(line.shape.as_list()))

    print("Vars for Phase-only loss:")
    varws = [v for v in tf.global_variables() if "weight" in v.name]
    varsForPhaseOnly = [v for v in varws if "SharedOverFeat" in v.name]
    for line in varsForPhaseOnly:
        print(line.name + '           ' + str(line.shape.as_list()))

    # pdb.set_trace()

    gene_loss, MoreOut, MoreOut2, MoreOut3 = srez_modelBase.create_generator_loss(
        disc_fake_output, gene_output, train_features, train_labels, varsForL1,
        varsForL2, varsForPhaseOnly)
    disc_real_loss, disc_fake_loss = \
                     srez_modelBase.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_modelBase.create_optimizers(gene_loss, gene_var_list, disc_loss, disc_var_list)

    # Train model
    train_data = TrainData(locals())

    #pdb.set_trace()
    # ggg: to restore session
    RestoreSession = False
    if RestoreSession:
        saver = tf.train.Saver()
        filename = 'checkpoint_new'
        filename = os.path.join(myParams.myDict['checkpoint_dir'], filename)
        saver.restore(sess, filename)

    srez_train.train_model(train_data)
Exemplo n.º 11
0
def _train():
    time_start = time.strftime("%Y-%m-%d-%H-%M-%S")
    print("START. Time is {}".format(time_start))

    # Setup global tensorflow state
    sess, summary_writer = setup_tensorflow()

    # image_size
    if FLAGS.sample_size_y > 0:
        image_size = [FLAGS.sample_size, FLAGS.sample_size_y]
    else:
        image_size = [FLAGS.sample_size, FLAGS.sample_size]

    # Prepare train and test directories (SEPARATE FOLDER)
    prepare_dirs(delete_train_dir=True, shuffle_filename=False)
    if FLAGS.cv_index >= 0:
        # Cross-validation
        filenames_input_train = []
        filenames_output_train = []
        for i in range(FLAGS.cv_groups):
            if i == FLAGS.cv_index:
                continue
            train_dir = os.path.join(FLAGS.dataset, str(i))
            filenames = get_filenames(dir_file=train_dir,
                                      shuffle_filename=True)
            filenames_input_train.extend(filenames)
            filenames_output_train.extend(filenames)
        test_dir = os.path.join(FLAGS.dataset, str(FLAGS.cv_index))
        filenames_input_test = get_filenames(dir_file=test_dir,
                                             shuffle_filename=True)
        filenames_output_test = get_filenames(dir_file=test_dir,
                                              shuffle_filename=True)
    else:
        filenames_input_train = get_filenames(dir_file=FLAGS.dataset_train,
                                              shuffle_filename=True)
        filenames_output_train = get_filenames(dir_file=FLAGS.dataset_train,
                                               shuffle_filename=True)
        filenames_input_test = get_filenames(dir_file=FLAGS.dataset_test,
                                             shuffle_filename=False)
        filenames_output_test = get_filenames(dir_file=FLAGS.dataset_test,
                                              shuffle_filename=False)

    # Record parameters
    parameters = save_parameters(time_start=time_start)

    ## Prepare directories (SAME FOLDER)
    #prepare_dirs(delete_train_dir=True, shuffle_filename=False)
    #filenames_input = get_filenames(dir_file=FLAGS.dataset_input, shuffle_filename=False)
    ## if not specify use the same as input
    #if FLAGS.dataset_output == '':
    #FLAGS.dataset_output = FLAGS.dataset_input
    #filenames_output = get_filenames(dir_file=FLAGS.dataset_output, shuffle_filename=False)

    # check input and output sample number matches (SEPARATE FOLDER)
    assert (len(filenames_input_train) == len(filenames_output_train))
    num_filename_train = len(filenames_input_train)
    assert (len(filenames_input_test) == len(filenames_output_test))
    num_filename_test = len(filenames_input_test)

    print(num_filename_train)
    print(num_filename_test)

    # check input and output sample number matches (SAME FOLDER)
    #assert(len(filenames_input)==len(filenames_output))
    #num_filename_all = len(filenames_input)

    # Permutate train and test split (SEPARATE FOLDERS)
    if FLAGS.permutation_split:
        index_permutation_split = random.sample(num_filename_train,
                                                num_filename_train)
        filenames_input_train = [
            filenames_input_train[x] for x in index_permutation_split
        ]
        filenames_output_train = [
            filenames_output_train[x] for x in index_permutation_split
        ]
        #print(np.shape(filenames_input_train))

    if FLAGS.permutation_split:
        index_permutation_split = random.sample(num_filename_test,
                                                num_filename_test)
        filenames_input_test = [
            filenames_input_test[x] for x in index_permutation_split
        ]
        filenames_output_test = [
            filenames_output_test[x] for x in index_permutation_split
        ]
    #print('filenames_input[:20]',filenames_input[:20])

    # Permutate test split (SAME FOLDERS)
    #if FLAGS.permutation_split:
    #index_permutation_split = random.sample(num_filename_test, num_filename_test)
    #filenames_input_test = [filenames_input_test[x] for x in index_permutation_split]
    #filenames_output_test = [filenames_output_test[x] for x in index_permutation_split]
    #print('filenames_input[:20]',filenames_input[:20])

    # Separate training and test sets (SEPARATE FOLDERS)
    sample_train = len(filenames_input_train
                       ) if FLAGS.sample_train <= 0 else FLAGS.sample_train
    sample_test = len(
        filenames_input_test) if FLAGS.sample_test <= 0 else FLAGS.sample_test

    train_filenames_input = filenames_input_train[:sample_train]
    train_filenames_output = filenames_output_train[:sample_train]

    # TODO If separate folders, make the index `:sample_test`
    # Using index `-sample_test:` hacks it for a same-folder split.
    test_filenames_input = filenames_input_test[
        -sample_test:]  # filenames_input_test[:sample_test]
    test_filenames_output = filenames_output_test[
        -sample_test:]  # filenames_output_test[:sample_test]
    #print('test_filenames_input', test_filenames_input)
    #print('train_filenames_input', train_filenames_input)

    # Separate training and test sets (SAME FOLDERS)
    #train_filenames_input = filenames_input[:-FLAGS.sample_test]
    #train_filenames_output = filenames_output[:-FLAGS.sample_test]
    #test_filenames_input  = filenames_input[-FLAGS.sample_test:]
    #test_filenames_output  = filenames_output[-FLAGS.sample_test:]
    #print('test_filenames_input[:20]',test_filenames_input[:20])

    # randomly subsample for train
    if FLAGS.subsample_train > 0:
        index_sample_train_selected = random.sample(
            range(len(train_filenames_input)), FLAGS.subsample_train)
        if not FLAGS.permutation_train:
            index_sample_train_selected = sorted(index_sample_train_selected)
        train_filenames_input = [
            train_filenames_input[x] for x in index_sample_train_selected
        ]
        train_filenames_output = [
            train_filenames_output[x] for x in index_sample_train_selected
        ]
        print('randomly sampled {0} from {1} train samples'.format(
            len(train_filenames_input),
            len(train_filenames_input[:FLAGS.sample_train])))

    # randomly sub-sample for test
    if FLAGS.subsample_test > 0:
        index_sample_test_selected = random.sample(
            range(len(test_filenames_input)), FLAGS.subsample_test)
        if not FLAGS.permutation_test:
            index_sample_test_selected = sorted(index_sample_test_selected)
        test_filenames_input = [
            test_filenames_input[x] for x in index_sample_test_selected
        ]
        test_filenames_output = [
            test_filenames_output[x] for x in index_sample_test_selected
        ]
        #print('randomly sampled {0} from {1} test samples'.format(len(test_filenames_input), len(filenames_inp/.ut[:-FLAGS.sample_test])))

    #print('test_filenames_input',test_filenames_input)

    # get undersample mask
    from scipy import io as sio
    try:
        content_mask = sio.loadmat(FLAGS.sampling_pattern)
        key_mask = [x for x in content_mask.keys() if not x.startswith('_')]
        mask = content_mask[key_mask[0]]
    except:
        mask = None

    # Setup async input queues
    train_features, train_labels, train_masks = srez_input.setup_inputs_one_sources(
        sess,
        train_filenames_input,
        train_filenames_output,
        image_size=image_size,
        # undersampling
        axis_undersample=FLAGS.axis_undersample,
        r_factor=FLAGS.R_factor,
        r_alpha=FLAGS.R_alpha,
        r_seed=FLAGS.R_seed,
        sampling_mask=mask)
    test_features, test_labels, test_masks = srez_input.setup_inputs_one_sources(
        sess,
        test_filenames_input,
        test_filenames_output,
        image_size=image_size,
        # undersampling
        axis_undersample=FLAGS.axis_undersample,
        r_factor=FLAGS.R_factor,
        r_alpha=FLAGS.R_alpha,
        r_seed=FLAGS.R_seed,
        sampling_mask=mask)

    print('train_features_queue', train_features.get_shape())
    print('train_labels_queue', train_labels.get_shape())
    print('train_masks_queue', train_masks.get_shape())

    #train_masks = tf.cast(sess.run(train_masks), tf.float32)
    #test_masks = tf.cast(sess.run(test_masks), tf.float32)

    # sample train and test
    num_sample_train = len(train_filenames_input)
    num_sample_test = len(test_filenames_input)
    print('train on {0} samples and test on {1} samples'.format(
        num_sample_train, num_sample_test))

    # Add some noise during training (think denoising autoencoders)
    noise_level = .00
    noisy_train_features = train_features + \
                           tf.random_normal(train_features.get_shape(), stddev=noise_level)

    # Create and initialize model
    [gene_minput, gene_moutput, gene_moutput_complex, \
     gene_output, gene_output_complex, gene_var_list, gene_layers, gene_mlayers, \
     disc_real_output, disc_fake_output, disc_moutput, disc_var_list, disc_layers, disc_mlayers] = \
            srez_model.create_model(sess, noisy_train_features, train_labels, train_masks, architecture=FLAGS.architecture)

    gene_loss, gene_dc_loss, gene_ls_loss, list_gene_losses, gene_mse_factor = srez_model.create_generator_loss(
        disc_fake_output, gene_output, gene_output_complex, train_features,
        train_labels, train_masks)
    disc_real_loss, disc_fake_loss = \
                     srez_model.create_discriminator_loss(disc_real_output, disc_fake_output)
    disc_loss = tf.add(disc_real_loss, disc_fake_loss, name='disc_loss')

    # add gradient on disc loss
    disc_gradients = tf.gradients(
        disc_loss, [disc_fake_output, disc_real_output, gene_output])
    print('disc loss gradients:', [x.shape for x in disc_gradients])

    (global_step, learning_rate, gene_minimize, disc_minimize) = \
            srez_model.create_optimizers(gene_loss, gene_var_list,
                                         disc_loss, disc_var_list)

    summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)

    # Train model
    train_data = TrainData(locals())
    srez_train.train_model(train_data, num_sample_train, num_sample_test)

    time_ended = time.strftime("%Y-%m-%d-%H-%M-%S")
    print("ENDED. Time is {}".format(time_ended))

    # Overwrite log file now that we are complete
    save_parameters(use_flags=False,
                    existing=parameters,
                    time_ended=time_ended)