コード例 #1
0
def generator_fn(input_images, is_training=True):
    with tf.variable_scope('G1'):
        generated_input = pix2pix_G(input_images, is_training) * circle(64, 64)
    with tf.variable_scope('G2'):
        generated_data = pix2pix_G(generated_input, is_training) * circle(
            64, 64)

    return tf.concat((generated_data, generated_input), axis=-1)
コード例 #2
0
ファイル: main.py プロジェクト: manishmarahatta/anubad
    def _view_results(self, query_RESULTS):
        self.sidebar.clear()
        self.clips.clear()
        treeselection = self.sidebar.treeview.get_selection()

        end = self.viewer.textbuffer.get_end_iter()
        begin = end.get_offset()

        all_FUZZ = []
        for word, (FULL, FUZZ) in query_RESULTS.items():
            all_FUZZ += FUZZ
            if not FULL:
                self.viewer.not_found(word)
                continue

            for item in FULL:
                self.sidebar.add_suggestion(*item)
                self._view_item(*item)
                treeselection.select_path(self.sidebar.count - 1)

        end = self.viewer.textbuffer.get_end_iter()
        self.mark_CURRENT = (begin, end.get_offset())
        self.viewer.jump_to_end()

        for item in sorted(all_FUZZ, key=lambda k: k[2][1]):
            self.sidebar.add_suggestion(*item)

        print("clip:", self.clips, file=fp3)
        if len(self.clips) == 0: return
        self.clips_CYCLE = utils.circle(self.clips)
        self._circular_search(+1)
        self.search_entry.grab_focus()
コード例 #3
0
ファイル: main.py プロジェクト: krazedkrish/anubad
    def _view_results(self, query_RESULTS):
        self.sidebar.clear()
        self.clips.clear()
        treeselection = self.sidebar.treeview.get_selection()

        end = self.viewer.textbuffer.get_end_iter()
        begin = end.get_offset()

        all_FUZZ = []
        for word, (FULL, FUZZ) in query_RESULTS.items():
            all_FUZZ += FUZZ
            if not FULL:
                self.viewer.not_found(word)
                continue

            for item in FULL:
                self.sidebar.add_suggestion(*item)
                self._view_item(*item)
                treeselection.select_path(self.sidebar.count - 1)


        end = self.viewer.textbuffer.get_end_iter()
        self.mark_CURRENT = (begin, end.get_offset())
        self.viewer.jump_to_end()

        for item in sorted(all_FUZZ, key=lambda k: k[2][1]):
            self.sidebar.add_suggestion(*item)

        print("clip:", self.clips, file=fp3)
        if len(self.clips) == 0: return
        self.clips_CYCLE = utils.circle(self.clips)
        self._circular_search(+1)
        self.search_entry.grab_focus()
コード例 #4
0
ファイル: train.py プロジェクト: qingnengli/MMFI
def Discriminator(inputs,targets):

  traindir = os.path.join(logdir, 'G2\\pix2pix_D')
  if tf.gfile.Exists(traindir):
    tf.gfile.DeleteRecursively(traindir)
  tf.gfile.MakeDirs(traindir)

  fiber_output,fiber_input = inputs
  encoder, label = targets

  with tf.variable_scope('Generator'):
    with tf.variable_scope('G2'):
      generated_data = pix2pix_G(fiber_input) * circle(FLAGS.input_size,FLAGS.input_size)

  with tf.variable_scope('Discriminator',reuse=tf.AUTO_REUSE):
      discriminator_gen_outputs = pix2pix_D(tf.concat((generated_data,fiber_input),-1))
      discriminator_real_outputs = pix2pix_D(tf.concat((label, fiber_input), -1))

  with tf.name_scope('Train_summary'):
    reshaped_label = get_summary_image(label,FLAGS.grid_size)
    reshaped_fiber_input = get_summary_image(fiber_input,FLAGS.grid_size)
    reshaped_generated_data = get_summary_image(generated_data,FLAGS.grid_size)
    tf.summary.image('Fiber_Label', reshaped_label)
    tf.summary.image('Fiber_Input', reshaped_fiber_input)
    tf.summary.image('Generated_Data', reshaped_generated_data)

  with tf.name_scope('Train_Loss'):
    predict_real = discriminator_real_outputs
    predict_fake = discriminator_gen_outputs
    discrim_real_loss = tf.reduce_mean(tf.abs(1-predict_real))
    discrim_gen_loss = tf.reduce_mean(tf.abs(-1-predict_fake))
    discrim_loss = discrim_real_loss + discrim_gen_loss
    total_loss = discrim_loss +  tf.losses.get_regularization_loss()
    total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.')
    tf.summary.scalar('Total_loss',total_loss)
    tf.summary.scalar('discrim_loss', discrim_loss)
    tf.summary.scalar('discrim_real_loss',discrim_real_loss)
    tf.summary.scalar('discrim_gen_loss',discrim_gen_loss)

  with tf.name_scope('Train_OP'):
    tf.summary.scalar('predict_real', tf.reduce_mean(predict_real))
    tf.summary.scalar('predict_fake', tf.reduce_mean(predict_fake))
    tf.summary.scalar('discrim_lr', get_lr(FLAGS.discriminator_lr,decay_steps=5000))

  train_op = slim.learning.create_train_op(total_loss,
                                           get_optimizer(get_lr(FLAGS.discriminator_lr,decay_steps=5000)),
                                           update_ops =tf.get_collection(tf.GraphKeys.UPDATE_OPS),
                                           variables_to_train=
                                           tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                                                          scope='Discriminator')
                                           )

  slim.learning.train(train_op, traindir,
                      number_of_steps =FLAGS.max_iter,
                      log_every_n_steps=FLAGS.log_n_steps,
                      init_fn=get_init_fn('E:\GitHub\MMFI\log\\G2\\pix2pix_G',
                                          inclusion_scope=['Generator/G2']),
                      save_summaries_secs=FLAGS.save_summaries_secs,
                      save_interval_secs = FLAGS.save_interval_secs)
コード例 #5
0
ファイル: main.py プロジェクト: manishmarahatta/anubad
    def sidebar_on_row_changed(self, treeselection):
        model, pathlist = treeselection.get_selected_rows()
        self.clips.clear()
        for path in pathlist:
            self._view_item(*self.sidebar.get_suggestion(path))

        if len(self.clips) == 0: return
        self.clips_CYCLE = utils.circle(self.clips)
        self._circular_search(+1)
コード例 #6
0
ファイル: main.py プロジェクト: krazedkrish/anubad
    def sidebar_on_row_changed(self, treeselection):
        model, pathlist = treeselection.get_selected_rows()
        self.clips.clear()
        for path in pathlist:
            self._view_item(*self.sidebar.get_suggestion(path))

        if len(self.clips) == 0: return
        self.clips_CYCLE = utils.circle(self.clips)
        self._circular_search(+1)
コード例 #7
0
ファイル: train.py プロジェクト: qingnengli/MMFI
def Generator_2(inputs,targets):

  traindir = os.path.join(logdir, 'G2\\pix2pix_G')
  if tf.gfile.Exists(traindir):
    tf.gfile.DeleteRecursively(traindir)
  tf.gfile.MakeDirs(traindir)

  fiber_output,fiber_input = inputs
  encoder, label = targets

  with tf.variable_scope('Generator'):
    with tf.variable_scope('G2'):
      generated_data = pix2pix_G(fiber_input) * circle(FLAGS.input_size,FLAGS.input_size)

  with tf.name_scope('Train_summary'):
    reshaped_fiber_input = get_summary_image(fiber_input,FLAGS.grid_size)
    reshaped_label = get_summary_image(label,FLAGS.grid_size)
    reshaped_generated_data = get_summary_image(generated_data,FLAGS.grid_size)
    tf.summary.image('Fiber_Input', reshaped_fiber_input)
    tf.summary.image('Fiber_Label', reshaped_label)
    tf.summary.image('Generated_Data', reshaped_generated_data)

  with tf.name_scope('g2_loss'):
    G2_loss = combine_loss(generated_data, label, add_summary=True)
  with tf.name_scope('Train_Loss'):
    reg_loss = tf.losses.get_regularization_loss()
    total_loss = G2_loss + reg_loss
    total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.')
    tf.summary.scalar('Regularization_loss',reg_loss)
    tf.summary.scalar('G2_loss', G2_loss)
    tf.summary.scalar('Total_loss',total_loss)

  lr = get_lr(FLAGS.generator_lr)
  optimizer = get_optimizer(lr)
  update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
  train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops =update_ops,
                                           variables_to_train=
                                           tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                                             scope='Generator/G2')
                                           )

  with tf.name_scope('Train_ops'):
    psnr = tf.reduce_mean(tf.image.psnr(generated_data, label, max_val=1.0))
    ssim = tf.reduce_mean(tf.image.ssim(generated_data, label, max_val=1.0))
    corr = correlation(generated_data, label)
    tf.summary.scalar('PSNR', psnr)
    tf.summary.scalar('SSIM', ssim)
    tf.summary.scalar('Relation', corr)
    tf.summary.scalar('Learning_rate', lr)

  slim.learning.train(train_op, traindir,
                      number_of_steps =FLAGS.max_iter,
                      log_every_n_steps=FLAGS.log_n_steps,
                      # init_fn=get_init_fn('E:\GitHub\MMFI\log\\G2\\pix2pix_G'),
                      save_summaries_secs=FLAGS.save_summaries_secs,
                      save_interval_secs = FLAGS.save_interval_secs)
コード例 #8
0
def post_generate(resource, underneath, quantity, capital):
    resources_ = check_resources(resource, capital)
    while resources_ < quantity:
        pos_ = random.randrange(0, 8)
        territory_ = utils.circle(capital, 1, map_size)
        world_map[territory_[pos_]]['type'] = underneath
        world_map[territory_[pos_]]['above'] = resource
        for neighbour_ in utils.plus_sign(territory[pos_], map_size):
            if world_map[neighbour_]['type'] == 'ocean':
                world_map[neighbour_]['type'] = 'water'
        resources_ = check_resources(resource, capital)
コード例 #9
0
ファイル: data_loader.py プロジェクト: qingnengli/MMFI
def _fiber_input_preprocess(image):
    # Extract the effective regions
    processed_image = tf.image.resize_image_with_crop_or_pad(image, 768, 768)
    processed_image = tf.image.resize_images(
        processed_image, [FLAGS.input_size, FLAGS.input_size])

    processed_image = processed_image * circle(FLAGS.input_size,
                                               FLAGS.input_size)

    min, max = tf.reduce_min(processed_image), tf.reduce_max(processed_image)
    processed_image = (processed_image - min) / tf.maximum((max - min), 1)
    return processed_image
コード例 #10
0
ファイル: data_loader.py プロジェクト: qingnengli/MMFI
def _label_preprocess(label):
    processed_image = tf.image.resize_images(
        label, [FLAGS.input_size, FLAGS.input_size * 4 // 3])
    processed_image = tf.image.resize_image_with_crop_or_pad(
        processed_image, FLAGS.input_size, FLAGS.input_size)
    with tf.device('/cpu:0'):
        # Rotate image(s) counterclockwise(逆时针) by the passed angle(s) in radians.
        processed_image = tf.contrib.image.rotate(images=processed_image,
                                                  angles=42 * 3.1415926 / 180)

    processed_image = processed_image * circle(FLAGS.input_size,
                                               FLAGS.input_size)

    min, max = tf.reduce_min(processed_image), tf.reduce_max(processed_image)
    processed_image = (processed_image - min) / tf.maximum((max - min), 1)
    return processed_image
コード例 #11
0
ファイル: test.py プロジェクト: timjolson/halite2-bot
# for fill in list(range(0,20,3)):
#     for r in list(range(1,27,5)):
#         blank = np.zeros([100, 100])
#         circle(blank, [50,50], r, 255, fill)
#         bb = blur(blank, r*1.2+2)
#         grad = np.gradient(bb)
#         grad_mag = np.sqrt(np.power(grad[0],2) + np.power(grad[1],2))
#         plot3d(range(100), range(100), bb).show()
#         # plot3d(range(100), range(100), grad_mag).show()
#         cv2.imshow('blank',blank)
#         cv2.imshow('bb',bb)
#         cv2.imshow('g',grad_mag)
#         cv2.waitKey(0)
#         # time.sleep(100)

for fill in list(range(-1, 20, 3)):
    blank = np.zeros([800, 800])
    circle(blank, [400, 400], 380, 1, fill)
    cv2.imshow('', blank)
    cv2.waitKey(200)

for r in range(0, 50, 10):
    for x in range(-5, 170, 20):
        for y in range(-5, 90, 20):
            # for fill in [-1]:
            for fill in list(range(-1, 10, 3)):
                blank = np.zeros([71, 151])
                circle(blank, [x, y], r, 1, fill)
                cv2.imshow('', blank)
                cv2.waitKey(50)
コード例 #12
0
def check_resources(resource, capital):
    resources_ = 0
    for neighbour_ in utils.circle(capital, 1, map_size):
        if world_map[neighbour_]['above'] == resource:
            resources_ += 1
    return resources_
コード例 #13
0
    world_map[(capital_cells[i] // map_size) * map_size +
              (capital_cells[i] % map_size)]['above'] = 'capital'
    world_map[(capital_cells[i] // map_size) * map_size +
              (capital_cells[i] % map_size)]['tribe'] = tribes[i]

done_tiles = []
active_tiles = []
for i in range(len(capital_cells)):
    done_tiles.append(capital_cells[i])
    active_tiles.append([capital_cells[i]])
while len(done_tiles) != map_size**2:
    for i in range(len(tribes)):
        if len(active_tiles[i]) and tribes[i] != 'Polaris':
            rand_number = random.randrange(0, len(active_tiles[i]))
            rand_cell = active_tiles[i][rand_number]
            neighbours = utils.circle(rand_cell, 1, map_size)
            valid_neighbours = list(
                filter(
                    lambda tile: tile not in done_tiles and world_map[tile][
                        'type'] != 'water', neighbours))
            if not len(valid_neighbours):
                valid_neighbours = list(
                    filter(lambda tile: tile not in done_tiles, neighbours))
            if len(valid_neighbours):
                new_rand_number = random.randrange(0, len(valid_neighbours))
                new_rand_cell = valid_neighbours[new_rand_number]
                world_map[new_rand_cell]['tribe'] = tribes[i]
                active_tiles[i].append(new_rand_cell)
                done_tiles.append(new_rand_cell)
            else:
                active_tiles[i].remove(rand_cell)
コード例 #14
0
g0.append(Gp(k, tau, theta))
for l in range(n - 2, -1, -1):
    theta = p[l]
    g0.append(Gp(k, tau, theta))
for l in range(n - 2, -1, -1):
    tau = p[l]
    g0.append(Gp(k, tau, theta))

rea = np.real(g0)
img = np.imag(g0)
plt.plot(rea, img, '--')

# M1 Simpliefied nominal model with no time delay
g1 = Gp(2.5, 2.5, 0)
r1 = np.max(np.abs(g1 - g0))
[c1x, c1y] = circle(np.real(g1), np.imag(g1), r1)
plt.plot(c1x, c1y, 'r')

# M2 Mean parameter values
g2 = Gp(2.5, 2.5, 2.5)
r2 = np.max(np.abs(g2 - g0))
[c2x, c2y] = circle(np.real(g2), np.imag(g2), r2)
plt.plot(c2x, c2y, 'g')

# M3 Nominal model corresponding to the smallest radius


def maxrad(g, g0):
    g = g[0] + 1j * g[1]
    return np.max(np.abs(g - g0))
コード例 #15
0
def train():
    # Loading the dataset
    dataloader = DataLoader(
        TrainDataset(opt.train_dataset),
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.n_cpu,
    )
    
    # Difinition of networks
    generator = Generator()
    discriminator_1 = Discriminator_1()
    discriminator_2 = Discriminator_2()
    feature_extractor = FeatureExtractor()
    
    # Loading the weights
    if opt.train_mode == "GAN":
        generator.load_state_dict(torch.load(opt.load_G))
        if opt.load_D1:
            discriminator_1.load_state_dict(torch.load(opt.load_D1))
        if opt.load_D2
            discriminator_2.load_state_dict(torch.load(opt.load_D2))
    
    generator.to(device)
    discriminator_1.to(device)
    discriminator_2.to(device)
    feature_extractor.to(device)

    feature_extractor.eval()
    generator.train()
    discriminator_1.train()
    discriminator_2.train()

    # Training options
    criterion_GAN = torch.nn.MSELoss()
    criterion_L1 = torch.nn.L1Loss()
    criterion_content = torch.nn.BCELoss()

    optimizer_G = torch.optim.Adam(generator.parameters(), lr=1e-4, betas=[0.8, 0.999])
    optimizer_D_1 = torch.optim.Adam(discriminator_1.parameters(), lr=1e-4, betas=[0.8, 0.999])
    optimizer_D_2 = torch.optim.Adam(discriminator_2.parameters(), lr=1e-4, betas=[0.8, 0.999])

    valid = Variable(torch.ones((opt.batch_size, 1)), requires_grad=False).to(device)
    fake = Variable(torch.zeros((opt.batch_size, 1)), requires_grad=False).to(device)
    
    for epoch in range(opt.epoch):
        for i, imgs in enumerate(dataloader):
            if i == len(dataloader) - 1:
                continue
            train_data = imgs['Blurred']
            train_label = imgs['Sharp']
            
            train_data = train_data.to(device)
            train_label = train_label.to(device)

            if opt.train_mode == 'pretrain':    
                ### Training the generator
                optimizer_G.zero_grad()

                g_train_output = generator(train_data)

                # Loss calculation
                loss_pre = criterion_GAN(train_label, g_train_output)
                loss_pre.backward()
                optimizer_G.step()

                print("[Epoch %d/%d] [Batch %d/%d] [G loss: %f]"
                % (epoch + 1, opt.epoch, i + 1, len(dataloader), loss_pre.item()))


            elif opt.train_mode == 'GAN':
                g_train_output = generator(train_data)
            
                ###### Training the discriminator
                optimizer_D_1.zero_grad()

                # output of the discriminator
                dis_1_label_out = Variable(discriminator_1(train_label), requires_grad=False) 
                dis_1_fake_out = Variable(discriminator_1(g_train_output.detach()), requires_grad=True) 

                # Loss calculation
                loss_D1_real = criterion_content(dis_1_label_out, valid)
                loss_D1_fake = criterion_content(dis_1_fake_out, fake)

                loss_D1 = torch.log((loss_D1_real + loss_D1_fake) / 2)

                loss_D1.backward()
                optimizer_D_1.step()

                ###### Training the DCT discriminator
                optimizer_D_2.zero_grad()

                # Preparing the mask with radius r
                masking = np.zeros([opt.batch_size, 1, 256, 256])

                masking_0 = circle(opt.param_mask)
                masking_1 = np.expand_dims(np.expand_dims(masking_0, 0), 0)

                for i in range(opt.batch_size):
                    masking[i, :, :, :] = masking_1

                masking = torch.Tensor(masking).to(device)

                # Changing RGB to YCbCr
                train_label_Y = rgb_to_ycbcr(train_label)
                g_train_output_Y = rgb_to_ycbcr(g_train_output.detach())
                
                # DCT transformation
                train_label_dct_0 = torch.abs(dct_2d(train_label_Y[:, 0, :, :]).unsqueeze(1))
                g_train_output_dct_0 = torch.abs(dct_2d(g_train_output_Y[:, 0, :, :]).unsqueeze(1))
                
                # Threshold processing
                train_label_dct_1 = train_label_dct_0 > opt.param_dct
                g_train_output_dct_1 = g_train_output_dct_0 > opt.param_dct
                
                train_label_dct_2 = train_label_dct_1.type(torch.cuda.FloatTensor)
                g_train_output_dct_2 = g_train_output_dct_1.type(torch.cuda.FloatTensor)

                # Remain the high frequency component
                train_label_dct = train_label_dct_2 * masking
                g_train_output_dct = g_train_output_dct_2 * masking
                
                dis_2_label_out = Variable(discriminator_2(train_label_dct), requires_grad=False)
                dis_2_fake_out = Variable(discriminator_2(g_train_output_dct), requires_grad=True) 
                
                # Loss Calculation
                loss_D2_real = criterion_content(dis_2_label_out, valid)
                loss_D2_fake = criterion_content(dis_2_fake_out, fake)
                
                loss_D2 = torch.log((loss_D2_real + loss_D2_fake) / 2)

                loss_D2.backward()
                optimizer_D_2.step()

                ###### Training the generator
                optimizer_G.zero_grad()

                # Adversarial loss of the generator
                loss_adv = criterion_L1(train_label, g_train_output) * opt.param_alpha

                # Content loss of the generator
                gen_features = feature_extractor(g_train_output)
                real_features = feature_extractor(train_label)
                loss_G = criterion_GAN(gen_features, real_features.detach()) * opt.param_beta

                # Adversarial loss of the discriminator
                dis_1_fake_out_mix = Variable(discriminator_1(g_train_output), requires_grad=True) 
                loss_D1_fake_mix = criterion_content(dis_1_fake_out_mix, valid)

                loss_D1_mix = torch.log(loss_D1_fake_mix) * opt.param_gamma

                # Adversarial loss of the DCT discriminator
                g_train_output_Y_0 = rgb_to_ycbcr(g_train_output)
                g_train_output_dct_00 = torch.abs(dct_2d(g_train_output_Y_0[:, 0, :, :]).unsqueeze(1))
                g_train_output_dct_11 = g_train_output_dct_00 > opt.param_dct
                g_train_output_dct_22 = g_train_output_dct_11.type(torch.cuda.FloatTensor)
                g_train_output_dct_33 = g_train_output_dct_22 * masking
                dis_2_fake_out_mix_0 = Variable(discriminator_2(g_train_output_dct_33), requires_grad=True) 
                loss_D2_fake_mix = criterion_content(dis_2_fake_out_mix_0, valid)
                
                loss_D2_mix = torch.log(loss_D2_fake_mix) * opt.param_delta

                # Total loss
                loss_mix = loss_adv + loss_G - loss_D1_mix - loss_D2_mix

                loss_mix.backward()
                optimizer_G.step()

                print("[Epoch %d/%d] [Batch %d/%d] [Total loss: %f] [G adv loss: %f] [G loss: %f] [D1 loss: %f] [D2 loss: %f]"
                % (epoch + 1, opt.epoch, i + 1, len(dataloader), loss_mix.item(), loss_adv.item(), loss_G.item(), loss_D1_mix.item(), loss_D2_mix.item()))

            else:
                NotImplementedError(opt.train_mode)
                

        # Save the models
        if (epoch) % 100 == 0:
            if opt.train_mode == 'pretrain':
                torch.save(generator.cpu().state_dict(), "saved_models/pre/"+opt.network_name+"_G_"+str(epoch)+".pth")
                generator = generator.to(device)
            if opt.train_mode == 'GAN':
                torch.save(generator.cpu().state_dict(), "saved_models/GAN/"+opt.network_name+"/"+opt.network_name+"_G_"+str(epoch) + ".pth")
                torch.save(discriminator_1.cpu().state_dict(), "saved_models/GAN/"+opt.network_name+"/"+opt.network_name+"_D1_"+str(epoch)+".pth")
                torch.save(discriminator_2.cpu().state_dict(), "saved_models/GAN/"+opt.network_name+"/"+opt.network_name+"_D2_"+str(epoch)+".pth")
                generator = generator.to(device)
                discriminator_1 = discriminator_1.to(device)
                discriminator_2 = discriminator_2.to(device)
コード例 #16
0
def main(_):
	tf.logging.set_verbosity(tf.logging.INFO)
	with tf.Graph().as_default():
		logdir = 'E:\GitHub\MMFI\log\GG12\\CNN'
		evaldir = os.path.join(logdir, 'eval')
		if not tf.gfile.Exists(evaldir):
			# tf.gfile.DeleteRecursively(evaldir)
			tf.gfile.MakeDirs(evaldir)

		with tf.name_scope('inputs'):
			fiber_output, fiber_input, encoder, label = data_loader.read_inputs('valid.txt', False)

		with tf.variable_scope('Generator'):
			with tf.variable_scope('G1'):
				generated_input = pix2pix_G(fiber_output, is_training=False) \
				                  * circle(FLAGS.input_size,FLAGS.input_size)
			with tf.variable_scope('G2'):
				generated_data = pix2pix_G(generated_input,is_training=False)\
				                 * circle(FLAGS.input_size,FLAGS.input_size)

		with tf.name_scope('Valid_summary'):
			reshaped_fiber_input = get_summary_image(fiber_input, FLAGS.grid_size)
			reshaped_label = get_summary_image(label, FLAGS.grid_size)
			reshaped_generated_input = get_summary_image(generated_input, FLAGS.grid_size)
			reshaped_generated_data = get_summary_image(generated_data, FLAGS.grid_size)
			tf.summary.image('Input_Fiber', reshaped_fiber_input)
			tf.summary.image('Input_Generator', reshaped_generated_input)
			tf.summary.image('Data_Real', reshaped_label)
			tf.summary.image('Data_Generator', reshaped_generated_data)

		with tf.name_scope('Valid_op'):
			psnr = tf.reduce_mean(tf.image.psnr(generated_data, label, max_val=1.0))
			ssim = tf.reduce_mean(tf.image.ssim(generated_data, label, max_val=1.0))
			corr = correlation(generated_data, label)
			# inception_score = get_inception_score(generated_data)

			tf.summary.scalar('PSNR', psnr)
			tf.summary.scalar('SSIM', ssim)
			tf.summary.scalar('Relation', corr)

			grate = tf.ones([1,FLAGS.grid_size*FLAGS.input_size,10,1],dtype=tf.float32)
			reshaped_images = tf.concat((reshaped_generated_input, grate,
			                             reshaped_fiber_input, grate,
			                             reshaped_label, grate,
			                             reshaped_generated_data, grate), 2)
			uint8_images = tf.cast(reshaped_images*255, tf.uint8)
			image_write_ops = tf.write_file('%s/%s' % (evaldir, 'Generator_is_training_False.png'),
			                                tf.image.encode_png(uint8_images[0]))

			status_message = tf.string_join([' PSNR: ', tf.as_string(psnr), ' ',
			                                 ' SSIM: ', tf.as_string(ssim), ' ',
			                                 ' Correlation: ', tf.as_string(corr)],
			                                name='status_message')


		checkpoint_path = tf.train.latest_checkpoint(logdir)
		tf.logging.info('Evaluating %s' % checkpoint_path)

		tf.contrib.training.evaluate_once(
			checkpoint_path,
			hooks=[tf.contrib.training.SummaryAtEndHook(evaldir),
			       tf.contrib.training.StopAfterNEvalsHook(50),
						 tf.train.LoggingTensorHook([status_message],every_n_iter=5)],
			eval_ops=image_write_ops)
コード例 #17
0
g0.append(Gp(k, tau, theta))
for l in range(n-2, -1, -1):
    theta = p[l]
    g0.append(Gp(k, tau, theta))
for l in range(n-2, -1, -1):
    tau = p[l]
    g0.append(Gp(k, tau, theta))

rea = np.real(g0)
img = np.imag(g0)
plt.plot(rea, img, '--')

# M1 Simpliefied nominal model with no time delay
g1 = Gp(2.5, 2.5, 0)
r1 = np.max(np.abs(g1 - g0))
[c1x, c1y] = circle(np.real(g1), np.imag(g1), r1)
plt.plot(c1x, c1y, 'r')

# M2 Mean parameter values
g2 = Gp(2.5, 2.5, 2.5)
r2 = np.max(np.abs(g2 - g0))
[c2x, c2y] = circle(np.real(g2), np.imag(g2), r2)
plt.plot(c2x, c2y, 'g')

# M3 Nominal model corresponding to the smallest radius


def maxrad(g, g0):
    g = g[0] + 1j * g[1]
    return np.max(np.abs(g - g0))
コード例 #18
0
ファイル: trainer.py プロジェクト: progradeep/gan-pytorch
    def train(self):
        criterion = nn.BCELoss()

        input = torch.FloatTensor(self.batch_size, self.nc)
        noise = torch.FloatTensor(self.batch_size, self.nz)
        fixed_noise = torch.FloatTensor(self.batch_size, self.nz).normal_(0, 1)
        label = torch.FloatTensor(self.batch_size)
        real_label = 1
        fake_label = 0

        if self.cuda:
            criterion.cuda()
            input, label = input.cuda(), label.cuda()
            noise, fixed_noise = noise.cuda(), fixed_noise.cuda()

        fixed_noise = Variable(fixed_noise)

        # setup optimizer
        optimizerD = optim.Adam(self.netD.parameters(),
                                lr=self.lrD,
                                betas=(self.beta1, 0.999))
        optimizerG = optim.Adam(self.netG.parameters(),
                                lr=self.lrG,
                                betas=(self.beta1, 0.999))

        for epoch in range(self.niter):

            data, _ = circle(self.batch_size)
            if self.cuda:
                data = data.cuda()

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################

            # train with real
            self.netD.zero_grad()

            label.fill_(real_label)
            inputv = Variable(data)
            labelv = Variable(label)

            output = self.netD(inputv)
            errD_real = criterion(output, labelv)
            D_x = output.data.mean()
            errD_real.backward()

            # train with fake
            noise.normal_(0, 1)
            noisev = Variable(noise)
            fake = self.netG(noisev)

            labelv = Variable(label.fill_(fake_label))
            output = self.netD(fake.detach())
            errD_fake = criterion(output, labelv)
            D_G_z1 = output.data.mean()
            errD_fake.backward()

            errD = errD_real + errD_fake

            optimizerD.step()

            ############################
            # (2) Update G network: minimize log(1 - D(G(z)))
            ###########################
            self.netG.zero_grad()

            labelv = Variable(label.fill_(fake_label))
            output = self.netD(fake)
            errG = -criterion(output, labelv)
            D_G_z2 = output.data.mean()

            errG.backward()

            optimizerG.step()

            print(
                '[%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
                % (epoch, self.niter, errD.data[0], errG.data[0], D_x, D_G_z1,
                   D_G_z2))
            if epoch % 1000 == 0:
                plt.scatter(data[:, 0], data[:, 1], s=10)
                plt.savefig('%s/real_samples.png' % self.outf)
                fake = self.netG(fixed_noise)
                plt.scatter(fake[:, 0], fake[:, 1], s=10)
                plt.savefig('%s/fake_samples_epoch_%03d.png' %
                            (self.outf, epoch))
                plt.close()
コード例 #19
0
ファイル: PCloud2.py プロジェクト: DavidB-CMU/moped
    def toDepthImage(self, width, height, K, viewpoint = None, zbuffer=True):
        """ Compute Depth Image from point cloud.
        
        Usage: dImg = cloud.toDepthImage(width, height, K, viewpoint)
        
        Input:
            width - Width of output image
            height - Height of output image
            K - numpy array, 3-by-3 intrinsics camera matrix 
            viewpoint - numpy array 4x4 extrinsic camera matrix, image to world
            zbuffer{True} - Fill out missing values with Z-buffering
        Output:
            dImg - Output depth image from given viewpoint
        """
        # Parse input arguments
        if K is None:
            K = self.K
        if K.size == 9:
            KK = np.r_[K[0,0], K[1,1], K[0,2], K[1,2]]
        elif K.size == 4:
            KK = K

        if viewpoint is None:
            viewpoint = np.eye(4,4, dtype=float)

        pts3D = self.copy()

        # Order is important for Z-buffering
        # We want points further away to be processed first
        idx_dist = pts3D[2,:].argsort()
        pts3D = pts3D[:, idx_dist[::-1]]
        pts2D = utils.C_ProjectPts(pts3D, viewpoint, KK)
        
        # Now transfer points back to image
        bounds = np.array([ [0, width-1], [0, height-1] ])
        invalid_pts = utils.out_of_bounds(pts2D, bounds)
        pts2D[:, invalid_pts] = 0

        # Remember: IdxImg is in format [row col] 
        pts2D_int = np.array(np.round(pts2D), dtype=int)

        # Get new depth image
        dImg = DepthImage(np.zeros((height, width)), K = K)

        if not zbuffer:
            dImg[pts2D_int[1,:], pts2D_int[0,:]] = pts3D[2,:]
        else:
            # Z-Buffering --------------------------------------------------- #
            # Create some circle windows according to distance
            # Points that are further away from the camera --> smaller windows
            num_windows = 7 
            winX = list()
            winY = list()
            minX = list(); maxX = list()
            minY = list(); maxY = list()
            for i in range(num_windows):
                x, y = utils.circle( radius = i*0.75 + 2)
                winX.append(x)
                winY.append(y)
                minX.append(np.zeros(x.shape))
                maxX.append(np.ones(x.shape) * width - 1)
                minY.append(np.zeros(y.shape))
                maxY.append(np.ones(y.shape) * height - 1)
            
            # Paint pixels, first the ones further away
            for pt_idx in idx_dist[::-1]:
                winsize = np.int(np.fmin(np.ceil(2./pts3D[2, pt_idx]),
                                         num_windows))-2
                winX_idx = np.array(np.fmax(np.fmin(winX[winsize] + \
                                   pts2D_int[0, pt_idx], maxX[winsize]), \
                                   minX[winsize]), dtype=int)
                winY_idx = np.array(np.fmax(np.fmin(winY[winsize] + \
                                   pts2D_int[1, pt_idx], maxY[winsize]), \
                                   minY[winsize]), dtype=int)
                dImg[winY_idx, winX_idx] = pts3D[2, pt_idx]

        return dImg
コード例 #20
0
def 
for i in range(5):
    file125 = 'gal%s_UV_F125_scale_04_psfmatch.fits'%(i+1)
    hd = fits.open(file125)
    photflam = float(hd[1].header['PHOTFLAM'])
    data_125 = hd[1].data
    section_gal = 'NULIRG%s' % (int(i + 1))
    params, params_gal = basic_params(config_file, 'basic', section_gal)
    cent_x, cent_y = UV_centers_drz(file125)


    print ('=============================================\n')
    print ('<<<<<<<<<<<<<<<<<<<<<ULIRG %s>>>>>>>>>>>>>>>>>>\n'%(i+1))
    print ('=============================================\n')

    print ('Original shape', data.shape)
    print ('UV 125 centers --> (%s, %s)'% (cent_x, cent_y))

    

    
    print (' ----> Making annuli masks\n')
    width = 4.0
    aper_lim = 300

    if i ==4:
        aper_lim = 300*0.158/0.131
    nx = data_125.shape[0]
    ny = data_125.shape[1]
    rad1, rad_annulus, masks, masks_annulus = masks_circular(cent_x, cent_y, width, aper_lim, nx, ny)

    ################## computing center boxes ################
    if i ==1:
        x = [150, 150, 150, 350, 350, 350, 550, 550, 550]
        y = [150, 350, 550, 150, 350, 550, 150, 350, 550]
    elif i ==2:
        x = [150, 150, 150, 150, 350, 350, 350, 350, 550, 550, 550, 550, 750, 750, 750, 750]
        y = [150, 350, 550, 750, 150, 350, 550, 750, 150, 350, 550, 750, 150, 350, 550, 750]
    else:
        x = [350, 350, 350, 550, 550, 550, 750, 750, 750]
        y = [350, 550, 750, 350, 550, 750, 350, 550, 750]
      
        
    fig,((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2, figsize = (30,22))
    
    [plot_rectangle(x[i],y[i], szx, szy, color[i], ax1) for i in range(len(x))]
    norm = ImageNormalize(data_125, stretch=AsinhStretch())
    img = ax1.imshow(data_125, origin = 'lower', vmin =0.0005, vmax = 0.001, norm = norm, cmap = plt.cm.Greys_r, zorder =0)

    divider = make_axes_locatable(ax1)
    cax = divider.append_axes("right", size="5%", pad=0.0)
    cbar = plt.colorbar(img,cax=cax, orientation='vertical')
    cbar.minorticks_on
    

    data_lya = sum_ext(i, 'A_lmfit_x')
    err_lya = sum_ext(i, 'A_lmfit_err')
    ### positive flux cond
    
    #data_all[data_all<0] = 0.0
    cond_positive = data_lya < 0.0
    cond_negative = data_lya > 0.0

    #### SN >1 condition
    
    SN_all = abs(data_lya/err_lya)
    

    #SN_add = data_lya + err_lya
    #SN_diff = data_lya - err_lya

    #cond_sn = np.isnan(SN_all)
    #SN_all[cond_sn] = 0.0
    #SN_all[cond_sn_tot] = 0.0
    b_all = sum_ext(i, 'b_lmfit_x')
    cond_b = abs(abs(b_all)-3) <1e-5

    status_all = sum_ext(i, 'status_lmfit_x')
    cond_status = status_all == 0
    
    data_neg = np.copy(data_all)
    data_test = np.copy(data_all)
    aper_total =  [len(data_neg[masks_annulus[k]]) for k in range(len(rad1)-1) ]  
    
    aper_nonzero_total =  [non_zero(data_test[masks_annulus[k]]) for k in range(len(rad1)-1) ] 
    cond_test = cond_sn_tot + cond_b + cond_status
    data_test[cond_test] = 0.0
    aper_nonzero_total_cutoff = [non_zero(data_test[masks_annulus[k]]) for k in range(len(rad1)-1) ] 

    
    cond_all = cond_sn_tot + cond_b + cond_status + cond_positive   
    data_all[cond_all] = 0.0
    SN_add[cond_all] = 0.0
    SN_diff[cond_all] = 0.0
    
    cond_neg = cond_sn_tot + cond_b + cond_status + cond_negative   

    data_neg[cond_neg] = 0.0

    #data_neg = abs(data_neg)
    #img = ax1.imshow(data_all, origin = 'lower', vmin =-1e-15, vmax = 1e-20, norm = norm, cmap = plt.cm.Greys_r, zorder =0)

    
    fits.writeto('./ULIRG%s_test/data%s.fits'%(i+1, i+1), data = data_test, overwrite = True)
    fits.writeto('./ULIRG%s_test/SN%s.fits'%(i+1, i+1), data = SN_all, overwrite = True)
    fits.writeto('./ULIRG%s_test/b%s.fits'%(i+1, i+1), data = b_all, overwrite = True)
    fits.writeto('./ULIRG%s_test/status%s.fits'%(i+1, i+1), data = status_all, overwrite = True)

    

    aper_SN_low =  [np.mean(SN_add[masks_annulus[k]]) for k in range(len(rad1)-1) ]  
    aper_SN_high =  [np.mean(SN_diff[masks_annulus[k]]) for k in range(len(rad1)-1) ]  
    
    
    sum_SN_low =  [np.sum(SN_add[masks[k]]) for k in range(len(rad1)) ]  
    sum_SN_high =  [np.sum(SN_diff[masks[k]]) for k in range(len(rad1)) ]  

    
    sum_125 =  [np.sum(data_all[masks[k]]) for k in range(len(rad1)) ]  
    aper_125 =  [np.mean(data_all[masks_annulus[k]]) for k in range(len(rad1)-1) ]  
    
    sum_125_neg =  [np.sum(abs(data_neg[masks[k]])) for k in range(len(rad1)) ]  
    aper_125_neg =  [np.mean(abs(data_neg[masks_annulus[k]])) for k in range(len(rad1)-1) ]  
    
    sum_UV =  [np.nansum(data[masks[k]]*photflam*150) for k in range(len(rad1)) ]  
    aper_UV =  [np.nanmean(data[masks_annulus[k]]*photflam*150) for k in range(len(rad1)-1) ] 
    
    aper_nonzero =  [non_zero(data_all[masks_annulus[k]]) for k in range(len(rad1)-1) ] 
    aper_nonzero_neg =  [non_zero(data_neg[masks_annulus[k]]) for k in range(len(rad1)-1) ]  

    rad_kpc = [rad*0.04*scale[i] for rad in rad1]
    rad_annulus_kpc = [rad*0.04*scale[i] for rad in rad_annulus]
    diff = [(-aper_125_neg[k] + aper_125[k]) for k in range(len (rad_annulus))]
    
    diff_sum = [(-sum_125_neg[k] + sum_125[k]) for k in range(len (rad1))]

    #norm_sum = [x1/sum_125[-1] for x1 in sum_125]

    #low = [aper_125[i] -aper_125[i] /aper_SN[i] for i in range(len (rad_annulus))]
    #high = [aper_125[i] +aper_125[i]/aper_SN[i] for i in range(len (rad_annulus))]

    ax4.plot(rad_kpc, sum_125, 'r', label = r'$Ly\alpha$ positive')
    ax4.fill_between(rad_kpc, sum_SN_low, sum_SN_high, alpha =0.1, color = 'b', zorder =1)
    
    ax4.plot(rad_kpc, sum_125_neg, 'g', label = r'abs($Ly\alpha$ negative)')
    ax4.plot(rad_kpc, diff_sum, 'b', label = r'($Ly\alpha$ total)')
    ax4.plot(rad_kpc, sum_UV, 'k', label = 'F125 flux')

    
    
    ax3.plot(rad_annulus_kpc, aper_125, 'r', label = r'$Ly\alpha$ positive')

    ax3.fill_between(rad_annulus_kpc, aper_SN_low, aper_SN_high, alpha =0.1, color = 'b', zorder =1)

    ax3.plot(rad_annulus_kpc, aper_125_neg, 'g', label = r'abs($Ly\alpha$ negative)')
    ax3.plot(rad_annulus_kpc, diff, 'b', label = r'($Ly\alpha$ total)')
    ax3.plot(rad_annulus_kpc, aper_UV, 'k', lw = 2.0, label = 'F125 flux')
    
    
    theoretical = [2*np.pi*k*width for k in rad_annulus]
    ax2.plot(rad_annulus_kpc, theoretical, color = 'k', zorder =2,lw = 2.0, label = 'Expected')
    
    ax2.fill_between(rad_annulus_kpc, aper_total, alpha =0.1, color = 'b', zorder =1, label = 'Total')
    ax2.fill_between(rad_annulus_kpc, aper_nonzero_total, alpha =0.2, color = 'c', zorder =1, label = 'Total non-zero')
    ax2.fill_between(rad_annulus_kpc, aper_nonzero_total_cutoff, alpha =0.3, color = 'm', zorder =1, label = 'Quality fit cutoff')

    ax2.fill_between(rad_annulus_kpc, aper_nonzero, alpha =0.3, color = 'r', zorder =2, label ='Positive')
    ax2.fill_between(rad_annulus_kpc, aper_nonzero_neg, alpha =0.6, color = 'g', zorder =3, label = 'Negative')


    ''' #testing the total counts
    test = [aper_nonzero[k] + aper_nonzero_neg[k]- aper_nonzero_total_cutoff[k] for k in range(len (rad_annulus))]
    ax1.fill_between(rad_annulus_kpc, test, alpha =0.5, color = 'g', zorder =2, label = 'negative')
    '''
    #ax2.plot(rad_annulus_kpc, aper_UV, )
    
    
    
    ax3.set_xlim(0, 32)
    ax4.set_xlim(0, 32)
    ax2.set_xlim(0, 32)
    ax2.set_yscale('log')
    ax2.set_ylabel('Number of points')
    ax2.set_xlabel('Distance from UV center [kpc]')
    
    ax3.set_ylabel(r'Aperture mean Flux[in ergs-$s^{-1}$-$cm^{-2}$-arcsec$^{-2}$]')
    ax3.set_xlabel('Distance from UV center [kpc]')
    
    
    ax3.set_yscale('log')
    
    ax3.axvline(x = petro_all[i], color = 'm', linestyle='--', label = r'$2\times R_p$')
    ax4.axvline(x = petro_all[i], color = 'm', linestyle = '--', label = r'$2\times R_p$')
    
    ax2.legend(loc ='lower left')
    ax3.legend(loc ='upper right')
    
    ax1.set_xlim(0,1000)
    ax1.set_ylim(0,1000)
    ax1.plot(cent_x, cent_y, marker ='x', color = 'b', mew=5, ms=20)
    
    circle(cent_x, cent_y, aper_lim, 'c', ax1)
    circle(cent_x, cent_y, petro_all[i]/(0.05*scale[i]), 'm', ax1)
    
    new_label_x = np.linspace((cent_x-sz[i]), (cent_x+sz[i]), 10)#*0.05*phys )
    new_label_y = np.linspace((cent_y-sz[i]), (cent_y+sz[i]), 10)#*0.05*phys )
    ar_x = np.round((new_label_x- cent_x)*0.04*scale[i] )
    ar_y = np.round((new_label_y - cent_y)*0.04*scale[i] )

    ax1.set_yticks(new_label_y)
    ax1.set_yticklabels(int(x) for x in (ar_y))
    ax1.set_xticks(new_label_x)
    ax1.set_xticklabels((int(x) for x in (ar_x)))
    ax1.set_xlabel('kpc')
    ax1.set_ylabel('kpc')
    ax1.set_aspect(1.)
    ax4.legend(loc = 'upper left')
    
    
    ax4.set_ylabel(r'Cumulative Flux[in ergs-$s^{-1}$-$cm^{-2}$-arcsec$^{-2}$]')
    ax4.set_xlabel('Distance from UV center [kpc]')
    
    
    keys = ['radius', 'Lya_pos', 'UV', 'Lya_neg', 'Lya_total', ]
    
    dict_annuli = OrderedDict.fromkeys(keys)
    dict_annuli['radius'] = rad_annulus_kpc
    dict_annuli['Lya_pos'] = aper_125
    dict_annuli['UV'] = aper_UV
    dict_annuli['Lya_neg'] = aper_125_neg
    dict_annuli['Lya_total'] = diff
    
    
    df = pd.DataFrame.from_dict(dict_annuli)
    df.to_csv('ULIRG%s_annuli.csv'%(i+1), columns = keys, index = True)
    
    
    dict_sum = OrderedDict.fromkeys(keys)
    dict_sum['radius'] = rad_kpc
    dict_sum['Lya_pos'] = sum_125
    dict_sum['UV'] = sum_UV
    dict_sum['Lya_neg'] = sum_125_neg
    dict_sum['Lya_total'] = diff_sum
    
    
    df = pd.DataFrame.from_dict(dict_sum)
    df.to_csv('ULIRG%s_sum.csv'%(i+1), columns = keys, index = True)
    
    
    
    ax1.set_title('ULIRG %s F125 image'%(i+1), fontsize = 15)
    fig.savefig('ULIRG%s_radial.png'%(i+1), dvi = 400)
    plt.show()