Пример #1
0
            differences = [abs(extracted_time - each_date) for each_date in date_list_converted]
            minimum = min(differences)
            closest_date_index = differences.index(minimum)
            # Find index of relevant entry
            relevant_index = patient_series.index[closest_date_index]
            relevant_indices.append(relevant_index)
            # Update dataframe: Only if mismatch is smaller! Otherwise might overwrite with worse image!
            if minimum.days < original_labels.at[relevant_index, 'Time_Mismatch']:
                original_labels.at[relevant_index, 'Filename'] = os.path.basename(img_name)
                original_labels.at[relevant_index, 'Time_Mismatch'] = minimum.days
            # else:
            #     original_labels.at[relevant_index, 'Filename'] = img_name
            #     original_labels.at[relevant_index, 'Time_Mismatch'] = minimum.days
            print(relevant_index)
            counts += 1

# Plot histogram of time mismatches
original_labels['Time_Mismatch'] = original_labels['Time_Mismatch'].astype(float)
original_labels['Time_Mismatch'].plot.hist(by='Time_Mismatch', bins=40)
plt.show()

# Value counts
viable_entries = original_labels.Time_Mismatch.value_counts()[0] + original_labels.Time_Mismatch.value_counts()[1]
print(f'Number of viable entries is {viable_entries}')

# Make invalid mismatches nulls
original_labels.loc[original_labels.Time_Mismatch == 500, 'Time_Mismatch'] = np.nan
original_labels.loc[original_labels.Filename == "", 'Filename'] = np.nan
original_labels.to_csv(os.path.join('/data/COVID/Labels', 'new_gstt.csv'), index=False)
# Better approach would find combination of dates that minimises overall difference
Пример #2
0
def train():
    """
    function that trains the generator and discriminator
    loss function based on (Ledig 2017)
    optimzer - adam
    """
    gz, gvl = generator(blur)
    r_out, dvl = discriminator(real)
    f_out, dvl = discriminator(gz)
    
    with tf.name_scope("cost"):
        fake_dloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.zeros_like(f_out),logits = f_out))
        real_dloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones_like(r_out),logits = r_out))
        
        tf.summary.scalar("fake_dloss",fake_dloss)
        tf.summary.scalar("real_dloss",real_dloss)
        
        dloss = fake_dloss + real_dloss
        
        gloss = tf.reduce_mean(tf.math.square(real-gz)) + tf.math.pow(10.0,-3)*tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones_like(f_out),logits = f_out))
        tf. summary.scalar("gloss",gloss)
    
    with tf.name_scope("optimizer"):
        dis_optimizer = tf.train.AdamOptimizer(learning_rate = 0.001, name = 'doptimizer')
        gen_optimizer = tf.train.AdamOptimizer(learning_rate = 0.001, name = 'goptimizer')
        
        dgrads = dis_optimizer.compute_gradients(dloss, var_list = dvl)
        ggrads = gen_optimizer.compute_gradients(gloss, var_list = gvl)
        
        for g in dgrads:
            tf.summary.histogram("{} grad".format(g[1].name),g[0])
        for g in ggrads:                                                        #plotting the gradients
            tf.summary.histogram("{} grad".format(g[1].name), g[0])

        dis_opt = dis_optimizer.apply_gradients(dgrads)
        gen_opt = gen_optimizer.apply_gradients(ggrads)

    
    merged = tf.summary.merge_all()
    saver = tf.train.Saver(tf.global_variables(),max_to_keep = 5, keep_checkpoint_every_n_hours = 1)

    nepochs = 30
    

    with tf.Session() as sess:
        
        
        sess.run(tf.global_variables_initializer())
        
        try:
          saver = tf.train.import_meta_graph('/tmp/model.ckpt.meta')     #restoring model
          saver.restore(sess,'/tmp/model.ckpt')
        except:
          print("no saved model")

        writer = tf.summary.FileWriter('logs',graph = sess.graph)
        
        for _ in range(nepochs):
            
            i = 1
            prev_sum = 0

            while i<(n//batch_size):
                
                start = time.time()
                
                print("batch: ",i)

                batch_real, batch_blur = create_batch(i)
                
                _,dc = sess.run([dis_opt,dloss], feed_dict = {blur: np.array(batch_blur), real: np.array(batch_real)})
                _,gc,summary,gout = sess.run([gen_opt,gloss,merged, gz], feed_dict = {blur:np.array(batch_blur), real: np.array(batch_real)})
                

                writer.add_summary(summary,i)
                saver.save(sess,'/tmp/model.ckpt',global_step = i)

                if i%10==0:
                  j =0
                
                  for x,y in zip(batch_blur, batch_real):
                    fig,axs = plt.subplots(1,3, figsize = (15,15))
                    axs[0].imshow(cv.cvtColor(x, cv.COLOR_BGR2RGB))
                    axs[1].imshow(cv.cvtColor(gout[j,:,:,:], cv.COLOR_BGR2RGB))
                    axs[2].imshow(cv.cvtColor(y, cv.COLOR_BGR2RGB))
                    j+=1
                    plt.show()
                
                end = time.time()
                prev_sum+=(end-start)
                print('\n')
                print("Eta: ",str(datetime.timedelta(seconds =(prev_sum/i)*((n//batch_size)-i))))
                
                i+=1
                   
                print("discriminator cost: ",dc)
                print("generator cost: ",gc)
                print('\n')
        writer.close()