コード例 #1
0
def plot_batch_reconstruction(layer_dict, X_batch):
    try:
        x_size = y_size = int(np.sqrt(layer_dict['AAE_Input'].shape[1]))
        gridx = gridy = int(np.sqrt(X_batch.shape[0]))
        recon = ll.get_output(layer_dict['AAE_Output'],
                              X_batch).eval().reshape(X_batch.shape[0], x_size,
                                                      y_size)
        utils.plot_grid(recon, gridx, gridy, x_size, y_size)
    except:
        log('Expected square matrices for batch and sample....')
        log('Unable to plot grid.....')
コード例 #2
0
ファイル: test.py プロジェクト: davidath/aae
def plot_generated(layer_dict, random_sample, out=None):
    try:
        x_size = y_size = int(np.sqrt(layer_dict['AAE_Input'].shape[1]))
        gridx = gridy = int(np.sqrt(random_sample.shape[0]))
        gen_out = ll.get_output(layer_dict['AAE_Output'],
                                inputs={
                                    layer_dict['Z']: random_sample
                                }).eval()
        gen_out = gen_out.reshape(random_sample.shape[0], x_size, y_size)
        if out:
            utils.plot_grid(gen_out, gridx, gridy, x_size, y_size, out=out)
        else:
            utils.plot_grid(gen_out, gridx, gridy, x_size, y_size)
    except:
        log('Expected square matrices....')
        log('Unable to plot grid.....')
コード例 #3
0
def trainer(n_train,
            lr,
            m,
            sess,
            saver,
            n_steps=1000,
            beta=1.0,
            do_save_model=False,
            do_save_image=True):

    for i in range(n_train):
        utils.trainer(sess,
                      num_steps=n_steps,
                      train_op=m.train_op,
                      feed_dict_fn=lambda: {
                          m.lr_ph: lr,
                          m.beta_ph: beta
                      },
                      metrics=[m.metrics],
                      hooks=[m.plot_metrics_hook])

        global epoch
        epoch += 1

        if do_save_model:
            print("... saving model: %s" % FLAGS.file_ckpt)
            save_path = saver.save(sess, FLAGS.file_ckpt)

        if do_save_image:
            fig = c.dir_logs + '/' + 'fig_glow__%d' % (epoch)
            print("... saving figure: %s" % fig)

            plt.subplot(121)
            plt.imshow(
                utils.plot_grid(m.x_sampled_train).eval({
                    m.lr_ph: 0.0,
                    m.beta_ph: 0.9
                }))
            plt.subplot(122)
            plt.imshow(
                utils.plot_grid(m.x_sampled_train).eval({
                    m.lr_ph: 0.0,
                    m.beta_ph: 1.0
                }))
            #plt.show()
            plt.savefig(fig)
コード例 #4
0
ファイル: TSDFHandle.py プロジェクト: weders/tsdf
    def plot(self, mode='grid', points=None):

        if mode == 'grid':
            # TODO: make correct for TSDF
            grid = self.get_volume()[:, :, :, 0]
            from utils import plot_grid
            if points is not None:
                offset = np.asarray(
                    [self.bbox[0, 0], self.bbox[1, 0], self.bbox[2, 0]])
                eye = ((points - offset) / self.resolution).astype(int)
                plot_grid(grid, eye=eye)
            else:
                plot_grid(grid)

        if mode == 'mesh':
            from .utils import plot_mesh
            mesh = self.extract_mesh()
            plot_mesh(mesh)
コード例 #5
0
ファイル: shelling.py プロジェクト: HuviX/mai
    def run_simulation(self, fps: int, path: str, name: str) -> None:
        counter = 0
        plot_grid(self.grid, path, counter, self.n)
        non_happy = self.get_nonhappy()
        while len(non_happy) != 0:
            counter += 1
            self.make_change(non_happy)
            non_happy = self.get_nonhappy()
            plot_grid(
                self.grid,
                path,
                counter,
                self.n,
            )

        filenames = [f'{path}/pic{num}.png' for num in range(counter + 1)]
        images = [imageio.imread(filename) for filename in filenames]
        imageio.mimsave(f'{name}.gif', images, loop=1, fps=fps)
コード例 #6
0
ファイル: mnist.py プロジェクト: maxencealluin/mnist_DCGAN
def train_GAN(steps=1, training_iter=1000):
    running_count = 0
    epoch = 0

    def generator_loss(output):
        return -torch.sum(torch.log(output)) / output.shape[0]

    x_sub = x_train[:]
    y_sub = y[:]

    opti_netD = torch.optim.Adam(netD.parameters(), lr=lr, betas=(b1, 0.999))
    opti_netG = torch.optim.Adam(netG.parameters(), lr=lr, betas=(b1, 0.999))
    criterion = nn.BCELoss()

    acc_loss_d = 0
    acc_loss_g = 0
    sample = sample_noise(16)

    for i in range(training_iter):
        running_count += bs
        if running_count > len_data:
            epoch += 1
            running_count = 0
        if i % 200 == 0:
            utils.plot_grid(netG, sample, RGB=RGB)
            plt.pause(0.00005)
        for s in range(steps):
            opti_netD.zero_grad()
            x_true = torch.Tensor(
                x_sub[np.random.randint(0, len(x_sub), size=bs), :, :]).view(
                    -1, 1 if not RGB else 3, 28, 28).cuda()
            y_true = torch.ones(x_true.shape[0]).cuda()
            x_false = sample_noise(bs)
            y_false = torch.zeros(x_false.shape[0]).cuda()

            out_true = x_true
            fake = netG(x_false)
            res_true = netD(out_true).squeeze(1)
            res_false = netD(fake.detach()).squeeze(1)

            loss_D = criterion(res_true, y_true) + criterion(
                res_false, y_false)
            loss_D.backward()
            acc_loss_d += loss_D
            opti_netD.step()

        opti_netG.zero_grad()
        x_false = sample_noise(bs)
        # out_false = netG(x_false)
        res_false = netD(fake).squeeze(1)
        # print(res_false)
        loss_G = criterion(res_false, y_true)
        loss_G.backward()
        acc_loss_g += loss_G
        opti_netG.step()

        if i > 0 and i % 20:
            print(
                f"Epoch {epoch} Loss D {acc_loss_d / 20:8.3}    Loss G {acc_loss_g / 20:7.3}"
            )
            acc_loss_d = 0
            acc_loss_g = 0
        if epoch > 5000:
            break
コード例 #7
0
    def train(self,
              it_train,
              it_val,
              batch_size,
              num_epochs,
              out_dir,
              model_dir=None,
              save_every=10,
              resume=False,
              quick_run=False):
        """
        Training loop.
        it_train: training set iterator
        it_val: validation set iterator
        batch_size: batch size
        num_epochs: number of epochs to train
        out_dir: output directory to log results
        model_dir: output directory to log saved models
        save_every: how many epochs should we save the model?
        resume: if `True`, append to the results file, not overwrite it
        quick_run: only perform one minibatch per train/valid loop. This is
          good for fast debugging.
        """
        def _loop(fn, itr):
            rec = [[] for i in range(len(self.train_keys))]
            for b in range(itr.N // batch_size):
                X_batch, Y_batch = it_train.next()
                # print X_batch.shape, Y_batch.shape
                Z_batch = floatX(
                    self.sampler(X_batch.shape[0], self.latent_dim))
                results = fn(Z_batch, X_batch, Y_batch)
                for i in range(len(results)):
                    rec[i].append(results[i])
                if quick_run:
                    break
            return tuple([np.mean(elem) for elem in rec])

        header = ["epoch"]
        for key in self.train_keys:
            header.append("train_%s" % key)
        for key in self.train_keys:
            header.append("valid_%s" % key)
        header.append("lr")
        header.append("time")
        header.append("mode")
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)
        if model_dir != None and not os.path.exists(model_dir):
            os.makedirs(model_dir)
        if self.verbose:
            try:
                from nolearn.lasagne.visualize import draw_to_file
                draw_to_file(get_all_layers(self.dcgan['gen']),
                             "%s/gen_dcgan.png" % out_dir,
                             verbose=True)
                draw_to_file(get_all_layers(self.dcgan['disc']),
                             "%s/disc_dcgan.png" % out_dir,
                             verbose=True)
                draw_to_file(get_all_layers(self.p2p['gen']),
                             "%s/gen_p2p.png" % out_dir,
                             verbose=True)
                draw_to_file(get_all_layers(self.p2p['disc']),
                             "%s/disc_p2p.png" % out_dir,
                             verbose=True)
            except:
                pass
        f = open("%s/results.txt" % out_dir, "wb" if not resume else "a")
        if not resume:
            f.write(",".join(header) + "\n")
            f.flush()
            print ",".join(header)
        else:
            if self.verbose:
                print "loading weights from: %s" % resume
            self.load_model(resume)
        #cb = ReduceLROnPlateau(self.lr,verbose=self.verbose)
        for e in range(num_epochs):
            out_str = []
            out_str.append(str(e + 1))
            t0 = time()
            # training
            results = _loop(self.train_fn, it_train)
            for i in range(len(results)):
                # train_losses[i].append(results[i])
                out_str.append(str(results[i]))
            # if reduce_on_plateau:
            #    cb.on_epoch_end(np.mean(recon_losses), e+1)
            # validation
            results = _loop(self.loss_fn, it_val)
            for i in range(len(results)):
                # valid_losses[i].append(results[i])
                out_str.append(str(results[i]))
            out_str.append(str(self.lr.get_value()))
            out_str.append(str(time() - t0))
            out_str.append(self.train_mode)
            out_str = ",".join(out_str)
            print out_str
            f.write("%s\n" % out_str)
            f.flush()
            if self.train_mode in ['both', 'p2p']:
                # plot an NxN grid of [A, predict(A)]
                plot_grid("%s/out_%i.png" % (out_dir, e + 1),
                          it_val,
                          self.gen_fn,
                          is_a_grayscale=self.is_a_grayscale,
                          is_b_grayscale=self.is_b_grayscale)
                # plot big pictures of predict(A) in the valid set
                self.generate_atob(it_train,
                                   1,
                                   "%s/dump_train" % out_dir,
                                   deterministic=False)
                self.generate_atob(it_val,
                                   1,
                                   "%s/dump_valid" % out_dir,
                                   deterministic=False)
            if self.train_mode in ['both', 'dcgan']:
                # plot A generated from G(z)
                self.generate_gz(num_examples=20,
                                 batch_size=batch_size,
                                 out_dir="%s/dump_a" % out_dir,
                                 deterministic=False)
            if model_dir != None and (e + 1) % save_every == 0:
                self.save_model("%s/%i.model" % (model_dir, e + 1))
コード例 #8
0
ファイル: reject.py プロジェクト: mccbc/nrao
def reject(imfile, catfile, threshold):
    """Reject noisy detections.
    
    Parameters
    ----------
    imfile : str
        The path to the radio image file
    catfile : str
        The path to the source catalog, as obtained from detect.py
    threshold : float
        The signal-to-noise threshold below which sources are rejected
    """
    # Extract information from filename
    outfile = os.path.basename(catfile).split('cat_')[1].split('.dat')[0]
    region = outfile.split('region')[1].split('_band')[0]
    band = outfile.split('band')[1].split('_val')[0]
    min_value = outfile.split('val')[1].split('_delt')[0]
    min_delta = outfile.split('delt')[1].split('_pix')[0]
    min_npix = outfile.split('pix')[1]
    print("\nSource rejection for region {} in band {}".format(region, band))

    print("Loading image file")
    contfile = fits.open(imfile)
    data = contfile[0].data.squeeze()
    mywcs = wcs.WCS(contfile[0].header).celestial

    catalog = Table(Table.read(catfile, format='ascii'), masked=True)

    beam = radio_beam.Beam.from_fits_header(contfile[0].header)
    pixel_scale = np.abs(
        mywcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg
    ppbeam = (beam.sr / (pixel_scale**2)).decompose().value

    data = data / ppbeam

    # Remove existing region files
    if os.path.isfile('./reg/reg_' + outfile + '_annulus.reg'):
        os.remove('./reg/reg_' + outfile + '_annulus.reg')
    if os.path.isfile('./reg/reg_' + outfile + '_filtered.reg'):
        os.remove('./reg/reg_' + outfile + '_filtered.reg')

    # Load in manually accepted and rejected sources
    override_accepted = []
    override_rejected = []
    if os.path.isfile('./.override/accept_' + outfile + '.txt'):
        override_accepted = np.loadtxt('./.override/accept_' + outfile +
                                       '.txt').astype('int')
    if os.path.isfile('./.override/reject_' + outfile + '.txt'):
        override_rejected = np.loadtxt('./.override/reject_' + outfile +
                                       '.txt').astype('int')
    print("\nManually accepted sources: ", set(override_accepted))
    print("Manually rejected sources: ", set(override_rejected))

    print('\nCalculating RMS values within aperture annuli')
    pb = ProgressBar(len(catalog))

    data_cube = []
    masks = []
    rejects = []
    snr_vals = []
    mean_backgrounds = []

    for i in range(len(catalog)):
        x_cen = catalog['x_cen'][i] * u.deg
        y_cen = catalog['y_cen'][i] * u.deg
        major_fwhm = catalog['major_fwhm'][i] * u.deg
        minor_fwhm = catalog['minor_fwhm'][i] * u.deg
        position_angle = catalog['position_angle'][i] * u.deg
        dend_flux = catalog['dend_flux_band{}'.format(band)][i]

        annulus_width = 1e-5 * u.deg
        center_distance = 1e-5 * u.deg

        # Define some ellipse properties in pixel coordinates
        position = coordinates.SkyCoord(x_cen,
                                        y_cen,
                                        frame='icrs',
                                        unit=(u.deg, u.deg))
        pix_position = np.array(position.to_pixel(mywcs))
        pix_major_fwhm = major_fwhm / pixel_scale
        pix_minor_fwhm = minor_fwhm / pixel_scale

        # Cutout section of the image we care about, to speed up computation time
        size = (center_distance + annulus_width + major_fwhm) * 2.2
        cutout = Cutout2D(data, position, size, mywcs, mode='partial')
        cutout_center = regions.PixCoord(cutout.center_cutout[0],
                                         cutout.center_cutout[1])

        # Define the aperture regions needed for SNR
        ellipse_reg = regions.EllipsePixelRegion(
            cutout_center,
            pix_major_fwhm * 2.,
            pix_minor_fwhm * 2.,
            angle=position_angle
        )  # Make sure you're running the dev version of regions, otherwise the position angles will be in radians!

        innerann_reg = regions.CirclePixelRegion(
            cutout_center, center_distance / pixel_scale + pix_major_fwhm)
        outerann_reg = regions.CirclePixelRegion(
            cutout_center, center_distance / pixel_scale + pix_major_fwhm +
            annulus_width / pixel_scale)

        # Make masks from aperture regions
        ellipse_mask = mask(ellipse_reg, cutout)
        annulus_mask = mask(outerann_reg, cutout) - mask(innerann_reg, cutout)

        # Plot annulus and ellipse regions
        data_cube.append(cutout.data)
        masks.append([annulus_mask, ellipse_mask])

        # Calculate the SNR and aperture flux sums
        bg_rms = rms(cutout.data[annulus_mask.astype('bool')])
        peak_flux = np.max(cutout.data[ellipse_mask.astype('bool')])
        flux_rms_ratio = peak_flux / bg_rms
        snr_vals.append(flux_rms_ratio)

        # Reject bad sources below some SNR threshold
        rejected = False
        if flux_rms_ratio <= threshold:
            rejected = True

        # Process manual overrides
        if catalog['_idx'][i] in override_accepted:
            rejected = False
        if catalog['_idx'][i] in override_rejected:
            rejected = True
        rejects.append(int(rejected))

        # Add non-rejected source ellipses to a new region file
        fname = './reg/reg_' + outfile + '_filtered.reg'
        with open(fname, 'a') as fh:
            if os.stat(fname).st_size == 0:
                fh.write("icrs\n")
            if not rejected:
                fh.write("ellipse({}, {}, {}, {}, {}) # text={{{}}}\n".format(
                    x_cen.value, y_cen.value, major_fwhm.value,
                    minor_fwhm.value, position_angle.value, i))
        pb.update()

    # Plot the grid of sources
    plot_grid(data_cube, masks, rejects, snr_vals, catalog['_idx'])
    plt.suptitle(
        'region={}, band={}, min_value={}, min_delta={}, min_npix={}, threshold={:.4f}'
        .format(region, band, min_value, min_delta, min_npix, threshold))
    plt.show(block=False)

    # Get overrides from user
    print(
        'Manual overrides example: type "r319, a605" to manually reject source #319 and accept source #605.'
    )
    overrides = input(
        "\nType manual override list, or press enter to continue:\n").split(
            ', ')
    accepted_list = [
        s[1:] for s in list(filter(lambda x: x.startswith('a'), overrides))
    ]
    rejected_list = [
        s[1:] for s in list(filter(lambda x: x.startswith('r'), overrides))
    ]

    # Save the manually accepted and rejected sources
    fname = './.override/accept_' + outfile + '.txt'
    with open(fname, 'a') as fh:
        for num in accepted_list:
            fh.write('\n' + str(num))
    fname = './.override/reject_' + outfile + '.txt'
    with open(fname, 'a') as fh:
        for num in rejected_list:
            fh.write('\n' + str(num))
    print(
        "Manual overrides written to './.override/' and saved to source catalog. New overrides will be displayed the next time the rejection script is run."
    )

    # Process the new overrides, to be saved into the catalog
    rejects = np.array(rejects)
    acc = np.array([a[-2:] for a in accepted_list], dtype=int)
    rej = np.array([r[-2:] for r in rejected_list], dtype=int)
    rejects[acc] = 0
    rejects[rej] = 1

    # Save the catalog with new columns for SNR
    catalog.add_column(Column(snr_vals), name='snr_band' + band)
    catalog.add_column(np.invert(catalog.mask['snr_band' + band]).astype(int),
                       name='detected_band' + band)
    catalog.add_column(Column(rejects), name='rejected')
    catalog.write('./cat/cat_' + outfile + '_filtered.dat', format='ascii')
コード例 #9
0
def save_graph_and_k(inp_string,save_fig = False):
    grid = utils.char_to_bitmap(inp_string)
    k = utils.find_k(grid)
    utils.plot_grid(grid,k,save_fig)
コード例 #10
0
                                          weights,
                                          cols=par_names,
                                          N=10000)

    # ==========================
    # PLOT SAMPLES:
    # ==========================
    output_folder = 'results/' + sim + '/' + meth + '/'
    Path(output_folder).mkdir(parents=True, exist_ok=True)
    if seed >= 0 and seed < 10:
        if meth == 'True':
            seed = 'true_samples'

        samples_df = samples_df[~samples_df.isin([np.nan, np.inf, -np.inf]).
                                any(1)]
        plot_grid(samples_df, lims=[list(bounds[key]) for key in par_names])
        plt.savefig(output_folder + str(seed) + '-plot.png', dpi=300)
        plt.close()

        if 'TE' in sim and str(args.meth) == 'BO':
            dgp_funcs.plot_posterior_samples(target_model,
                                             x_counts=1000,
                                             samples=100,
                                             points=True)
            plt.savefig(output_folder + str(seed) + '-state.png', dpi=300)
            plt.close()

    # ==========================
    # STORE RESULTS:
    # ==========================
    # save Wasserstein distance and empirical time as results
コード例 #11
0
    def run_test(self):
        SAMPLES_PER_CLASS = 50
        N_CLASSES = 10
        TIME = 150
        BIN_SIZE = 10
        DELAY = 50
        DURATION = 10
        SPARSITY = 0.05
        CI_LVL = 0.95

        # Determine the output and spatio-temporal response to various patterns, including unknown classes
        for model in ["scratch", "trained"]:
            if model == "trained":  # Initially compute test statistics with model initialized from scratch, then do the same with trained model
                try:
                    self.network: Net = load(self.config.RESULT_FOLDER +
                                             "/model.pt")
                except FileNotFoundError as e:
                    print("No saved network model found.")
                    raise e
                # Direct network to GPU
                if P.GPU: self.network.to_gpu()
                self.stats_manager = utils.StatsManager(
                    self.network, self.config.CLASSES, self.config.ASSIGNMENTS)
            self.network.train(False)
            print("Testing " + model + " model...")

            for type in ["out", "st"]:
                if type == "out":
                    print("Computing output responses for various patterns")
                else:
                    print(
                        "Computing spatio-temporal responses for various patterns"
                    )
                unk = None
                for k in range(N_CLASSES + 1):
                    pattern_name = str(k) if k < N_CLASSES else "rnd"
                    print("Pattern: " + pattern_name)
                    encoder = PoissonEncoder(
                        time=self.config.TIME, dt=self.config.DT
                    ) if type == "out" else utils.CustomEncoder(
                        TIME, DELAY, DURATION, self.config.DT, SPARSITY)
                    dataset = self.data_manager.get_test(
                        [k], encoder,
                        SAMPLES_PER_CLASS) if k < N_CLASSES else None
                    # Get next input sample.
                    input_enc = next(
                        iter(dataset)
                    )["encoded_image"] if k < N_CLASSES else encoder(
                        torch.cat(
                            (torch.rand(SAMPLES_PER_CLASS, *
                                        self.config.INPT_SHAPE) *
                             (self.config.INPT_NORM /
                              (.25 * self.config.INPT_SHAPE[1] *
                               self.config.INPT_SHAPE[2])
                              if self.config.INPT_NORM is not None else 1.),
                             torch.zeros(SAMPLES_PER_CLASS, *
                                         self.config.LABEL_SHAPE)),
                            dim=3) * self.config.INTENSITY)
                    if P.GPU: input_enc = input_enc.cuda()
                    # Run the network on the input without labels
                    self.network.run(
                        inputs={"X": input_enc},
                        time=self.config.TIME if type == "out" else TIME)
                    # Update network activity monitoring
                    res = self.stats_manager.get_class_scores(
                    ) if type == "out" else self.stats_manager.get_st_resp(
                        bin_size=BIN_SIZE)
                    if k not in self.config.CLASSES and k < N_CLASSES:
                        unk = res if unk is None else torch.cat(
                            (unk, res), dim=0)
                    # Reset network state
                    self.network.reset_state_variables()
                    # Save results
                    if type == "out":
                        mean = res.mean(dim=0)
                        std = res.std(dim=0)
                        count = res.size(0)
                        utils.plot_out_resp(
                            [mean], [std], [count], [pattern_name + " out"],
                            self.config.CLASSES, self.config.RESULT_FOLDER +
                            "/" + model + "/out_mean_" + pattern_name + ".png",
                            CI_LVL)
                        utils.plot_out_dist(
                            mean, std, self.config.CLASSES,
                            self.config.RESULT_FOLDER + "/" + model +
                            "/out_dist_" + pattern_name + ".png")
                    else:
                        utils.plot_st_resp(
                            [res.mean(dim=0)[:, :, [0, 3, 6, 9]]],
                            [pattern_name + " resp."], BIN_SIZE,
                            self.config.RESULT_FOLDER + "/" + model +
                            "/st_resp_" + pattern_name + ".png")
                        res = res.mean(dim=3).mean(dim=2)
                        utils.plot_series([res.mean(dim=0)], [res.std(dim=0)],
                                          [pattern_name + " resp."], BIN_SIZE,
                                          self.config.RESULT_FOLDER + "/" +
                                          model + "/time_resp_" +
                                          pattern_name + ".png", CI_LVL)
                print("Pattern: unk")
                if type == "out":
                    mean = unk.mean(dim=0)
                    std = unk.std(dim=0)
                    count = unk.size(0)
                    utils.plot_out_resp([mean], [std], [count], ["unk out"],
                                        self.config.CLASSES,
                                        self.config.RESULT_FOLDER + "/" +
                                        model + "/out_mean_unk.png", CI_LVL)
                    utils.plot_out_dist(
                        mean, std, self.config.CLASSES,
                        self.config.RESULT_FOLDER + "/" + model +
                        "/out_dist_unk.png")

                else:
                    utils.plot_st_resp([unk.mean(dim=0)[:, :, [0, 3, 6, 9]]],
                                       ["unk resp."], BIN_SIZE,
                                       self.config.RESULT_FOLDER + "/" +
                                       model + "/st_resp_unk.png")
                    unk = unk.mean(dim=3).mean(dim=2)
                    utils.plot_series([unk.mean(dim=0)], [unk.std(dim=0)],
                                      ["unk resp."], BIN_SIZE,
                                      self.config.RESULT_FOLDER + "/" + model +
                                      "/time_resp_unk.png", CI_LVL)

        # Plot kernels
        print("Plotting network kernels")
        connections = {
            "inpt": ("X", "Y"),
            "exc": ("Y", "Y"),
            "inh": ("Z", "Y")
        }
        lin_coord = self.network.coord_y_disc.view(
            -1) * self.config.GRID_SHAPE[2] + self.network.coord_x_disc.view(
                -1)
        knl_idx = [
            torch.nonzero(lin_coord == i)
            for i in range(self.config.GRID_SHAPE[1] *
                           self.config.GRID_SHAPE[2])
        ]
        knl_idx = [
            knl_idx[i][0] if len(knl_idx[i]) > 0 else None
            for i in range(len(knl_idx))
        ]
        for name, conn in connections.items():
            w = self.network.connections[conn].w.t()
            lin_coord = lin_coord.to(w.device)
            kernels = torch.zeros(self.config.GRID_SHAPE[1] *
                                  self.config.GRID_SHAPE[2],
                                  self.config.GRID_SHAPE[1],
                                  self.config.GRID_SHAPE[2],
                                  device=w.device)
            if name != "inpt":
                w = w.view(
                    self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1],
                    self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1])
                w_red = torch.zeros(
                    self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1],
                    self.config.GRID_SHAPE[1] * self.config.GRID_SHAPE[2],
                    device=w.device)
                for i in range(w.size(1)):
                    w_red[:, lin_coord[i]] += w[:, i]
                w = w_red
            w = w.view(
                self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1],
                self.config.GRID_SHAPE[1], self.config.GRID_SHAPE[2])
            for i in range(kernels.size(0)):
                if knl_idx[i] is not None:
                    kernels[i, :, :] = w[knl_idx[i], :, :]
            utils.plot_grid(kernels,
                            path=self.config.RESULT_FOLDER + "/weights_" +
                            name + ".png",
                            num_rows=self.config.GRID_SHAPE[1],
                            num_cols=self.config.GRID_SHAPE[2])

        # Calculate accuracy on test set
        print("Evaluating test accuracy...")
        self.eval_pass(self.tst_set, train=False)
        print("Test accuracy: " +
              str(100 * self.stats_manager.eval_accuracy[-1]) + "%")

        print("Finished!")
N_BOOST = 500  # number of data/labels pairs used for boost
LABEL_TRESHOLD = 0.98  # label threshhold
LR_SOURCE = 0.0001  # source learning rate
LR_TARGET = 0.0001  # target learning rate
EPOCH_SOURCE = 15  # source training epochs
DOMAIN_ADAPTATION_STEPS = 30  # domain adaptation steps
OUTPUT_FOLDER = 'output/'  # location of the output folder

# --------------------------------------------------------------------------------------------------
# Load the data MNIST (X_S) and MMINSTM (x_t)
# --------------------------------------------------------------------------------------------------
X_S, _, Y_S, _ = return_mnist()
X_T, X_T_VAL, Y_T, Y_T_VAL = return_mnistm()

if ENABLE_PLOT == 'Plot':
    plot_grid(X_S, [np.argmax(i) for i in Y_S], False, '')
    plot_grid(X_T, [np.argmax(i) for i in Y_T], False, '')

# --------------------------------------------------------------------------------------------------
# Train on source domain data (X_S) and store the network or get trained
# --------------------------------------------------------------------------------------------------
if TRAIN_FRESH == 'Train':
    model = get_model(input_shape=(X_S.shape[1], X_S.shape[1], 3),
                      low_do=False)
    train_save_model(model,
                     X_S,
                     Y_S,
                     OUTPUT_FOLDER,
                     batch_size=124,
                     lr=LR_SOURCE,
                     epc=EPOCH_SOURCE)
コード例 #13
0
        u = y_gt.cpu().detach().numpy()
        u_pd = y_pd.cpu().detach().numpy()
        u_mean = u.mean(axis=1).reshape(-1)

        eps = 1.0e-6
        diffs = [
            np.linalg.norm(u[i].reshape(-1) - u_pd[i].reshape(-1)) /
            (np.linalg.norm(u[i].reshape(-1)) + eps) for i in range(len(u))
        ]
        diffs_over_time.append(diffs)

        print("test case {:>5d} | test loss: {:>7.12f}".format(i, losses[i]))

        if i in inds_of_sims_to_show:
            print("Plotting...")
            utils.plot_grid(dataset[i].pos.cpu().detach().numpy())
            plt.figure(0)
            utils.plot_fields(
                t=dataset[i].t,
                coords=dataset[i].pos,
                fields={
                    "y_pd": u_pd,
                    "y_gt": u,
                },
                save_path="./tmp_figs/",
                delay=0.0001,
            )
            plt.show()

        # if i == 2:  # 3 for grids, 2 for time points
        #     break
コード例 #14
0
def flux(region):

    # import the catalog file, get names of bands
    filename = glob('./cat/mastercat_region{}*'.format(region))[0]
    catalog = Table(Table.read(filename, format='ascii'), masked=True)
    catalog.sort('_idx')

    bands = np.array(filename.split('bands_')[1].split('.dat')[0].split('_'),
                     dtype=int)
    n_bands = len(bands)
    n_rows = len(catalog)

    ellipse_npix_col = MaskedColumn(length=len(catalog),
                                    name='ellipse_npix',
                                    mask=True)
    circ1_npix_col = MaskedColumn(length=len(catalog),
                                  name='circ1_npix',
                                  mask=True)
    circ2_npix_col = MaskedColumn(length=len(catalog),
                                  name='circ2_npix',
                                  mask=True)
    circ3_npix_col = MaskedColumn(length=len(catalog),
                                  name='circ3_npix',
                                  mask=True)

    n_rejected = 0

    for i in range(n_bands):

        band = bands[i]

        # Load image file for this band
        print("\nLoading image file for region {} in band {} (Image {}/{})".
              format(region, band, i + 1, n_bands))
        imfile = grabfileinfo(region, band)[0]
        contfile = fits.open(imfile)
        data = contfile[0].data.squeeze()

        # Set up wcs, beam, and pixel scale for this image
        mywcs = wcs.WCS(contfile[0].header).celestial
        beam = radio_beam.Beam.from_fits_header(contfile[0].header)
        pixel_scale = np.abs(
            mywcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg
        ppbeam = (beam.sr / (pixel_scale**2)).decompose().value
        print('ppbeam: ', ppbeam)
        data = data / ppbeam

        # Set up columns for each aperture
        peak_flux_col = MaskedColumn(length=len(catalog),
                                     name='peak_flux_band{}'.format(band),
                                     mask=True)
        annulus_median_col = MaskedColumn(
            length=len(catalog),
            name='annulus_median_band{}'.format(band),
            mask=True)
        annulus_rms_col = MaskedColumn(length=len(catalog),
                                       name='annulus_rms_band{}'.format(band),
                                       mask=True)
        ellipse_flux_col = MaskedColumn(
            length=len(catalog),
            name='ellipse_flux_band{}'.format(band),
            mask=True)
        circ1_flux_col = MaskedColumn(length=len(catalog),
                                      name='circ1_flux_band{}'.format(band),
                                      mask=True)
        circ2_flux_col = MaskedColumn(length=len(catalog),
                                      name='circ2_flux_band{}'.format(band),
                                      mask=True)
        circ3_flux_col = MaskedColumn(length=len(catalog),
                                      name='circ3_flux_band{}'.format(band),
                                      mask=True)

        ellipse_rms_col = MaskedColumn(length=len(catalog),
                                       name='ellipse_rms_band{}'.format(band),
                                       mask=True)
        circ1_rms_col = MaskedColumn(length=len(catalog),
                                     name='circ1_rms_band{}'.format(band),
                                     mask=True)
        circ2_rms_col = MaskedColumn(length=len(catalog),
                                     name='circ2_rms_band{}'.format(band),
                                     mask=True)
        circ3_rms_col = MaskedColumn(length=len(catalog),
                                     name='circ3_rms_band{}'.format(band),
                                     mask=True)

        circ1_r, circ2_r, circ3_r = 5e-6 * u.deg, 1e-5 * u.deg, 1.5e-5 * u.deg

        print('Photometering sources')
        pb = ProgressBar(len(catalog[np.where(catalog['rejected'] == 0)]))

        masks = []
        datacube = []
        rejects = []
        snr_vals = []
        names = []

        # Iterate over sources, extracting ellipse parameters
        for j in range(n_rows):

            if catalog['rejected'][j] == 1:
                continue

            source = catalog[j]
            x_cen = source['x_cen'] * u.deg
            y_cen = source['y_cen'] * u.deg
            major = source['major_fwhm'] * u.deg
            minor = source['minor_fwhm'] * u.deg
            pa = source['position_angle'] * u.deg

            annulus_width = 1e-5 * u.deg
            center_distance = 1e-5 * u.deg

            # Convert to pixel coordinates
            position = coordinates.SkyCoord(x_cen,
                                            y_cen,
                                            frame='icrs',
                                            unit=(u.deg, u.deg))
            pix_position = np.array(position.to_pixel(mywcs))
            pix_major = major / pixel_scale
            pix_minor = minor / pixel_scale

            # Create cutout
            size = np.max([
                circ3_r.value,
                major.value + center_distance.value + annulus_width.value
            ]) * 2.2 * u.deg
            try:
                cutout = Cutout2D(data, position, size, mywcs, mode='partial')
            except NoOverlapError:
                catalog['rejected'][j] = 1
                pb.update()
                continue
            cutout_center = regions.PixCoord(cutout.center_cutout[0],
                                             cutout.center_cutout[1])
            datacube.append(cutout.data)

            # create all aperture shapes
            ellipse_reg = regions.EllipsePixelRegion(cutout_center,
                                                     pix_major * 2.,
                                                     pix_minor * 2.,
                                                     angle=pa)
            circ1_reg = regions.CirclePixelRegion(cutout_center,
                                                  circ1_r / pixel_scale)
            circ2_reg = regions.CirclePixelRegion(cutout_center,
                                                  circ2_r / pixel_scale)
            circ3_reg = regions.CirclePixelRegion(cutout_center,
                                                  circ3_r / pixel_scale)

            innerann_reg = regions.CirclePixelRegion(
                cutout_center, center_distance / pixel_scale + pix_major)
            outerann_reg = regions.CirclePixelRegion(
                cutout_center, center_distance / pixel_scale + pix_major +
                annulus_width / pixel_scale)

            annulus_mask = mask(outerann_reg, cutout) - mask(
                innerann_reg, cutout)

            # get flux information from regions
            ellipse_flux, ellipse_rms, peak_flux, ellipse_mask, ellipse_npix = apsum(
                ellipse_reg, cutout)
            circ1_flux, circ1_rms, _, circ1_mask, circ1_npix = apsum(
                circ1_reg, cutout)
            circ2_flux, circ2_rms, _, circ2_mask, circ2_npix = apsum(
                circ2_reg, cutout)
            circ3_flux, circ3_rms, _, circ3_mask, circ3_npix = apsum(
                circ3_reg, cutout)

            annulus_rms = rms(cutout.data[annulus_mask.astype('bool')])
            annulus_median = np.median(
                cutout.data[annulus_mask.astype('bool')])

            # Add grid plot mask to list
            masklist = [
                ellipse_mask, annulus_mask, circ1_mask, circ2_mask, circ3_mask
            ]
            masks.append(masklist)

            # add fluxes to appropriate columns
            peak_flux_col[j] = peak_flux
            ellipse_flux_col[j], ellipse_rms_col[j] = ellipse_flux, ellipse_rms
            circ1_flux_col[j], circ1_rms_col[j] = circ1_flux, circ1_rms
            circ2_flux_col[j], circ2_rms_col[j] = circ2_flux, circ2_rms
            circ3_flux_col[j], circ3_rms_col[j] = circ3_flux, circ3_rms

            ellipse_npix_col[j] = ellipse_npix
            circ1_npix_col[j] = circ1_npix
            circ2_npix_col[j] = circ2_npix
            circ3_npix_col[j] = circ3_npix

            annulus_median_col[j] = annulus_median
            annulus_rms_col[j] = annulus_rms

            catalog['snr_band' + str(band)][j] = peak_flux / annulus_rms
            snr_vals.append(peak_flux / annulus_rms)
            names.append(catalog['_idx'][j])

            # Secondary rejection
            rejected = 0
            lowest_flux = np.min(
                [ellipse_flux, circ1_flux, circ2_flux, circ3_flux])
            #if lowest_flux <= annulus_median*ellipse_npix or lowest_flux < 0:
            if lowest_flux < 0:
                catalog['rejected'][j] = 1
                n_rejected += 1
                rejected = 1
            rejects.append(rejected)
            pb.update()

        # Plot the grid of sources
        plot_grid(datacube, masks, rejects, snr_vals, names)
        plt.suptitle('region={}, band={}'.format(region, band))
        plt.show(block=False)

        # add columns to catalog
        catalog.add_columns([
            peak_flux_col,
            ellipse_flux_col,
            ellipse_rms_col,
            circ1_flux_col,
            circ1_rms_col,
            circ2_flux_col,
            circ2_rms_col,
            circ3_flux_col,
            circ3_rms_col,
        ])

    catalog.add_columns([
        ellipse_npix_col, circ1_npix_col, circ2_npix_col, circ3_npix_col,
        annulus_median_col, annulus_rms_col
    ])
    print("\n{} sources flagged for secondary rejection".format(n_rejected))

    # save catalog
    catalog = catalog[sorted(catalog.colnames)]
    catalog.write(filename.split('.dat')[0] + '_photometered.dat',
                  format='ascii')
    print("\nMaster catalog saved as '{}'".format(
        filename.split('.dat')[0] + '_photometered.dat'))
コード例 #15
0
from utils import plot_grid, get_grid
from models import Model
from torch import optim
from tensorboardX import SummaryWriter
from PIL import Image
import torch

# Get train loader and val loader and test loader
train_loader = get_train_loader()
val_loader = get_val_loader()
test_loader = get_test_loader()

# Test train loader
sample = iter(val_loader).next()
imgs, lms = sample['image'], sample['landmarks']
plot_grid(imgs, lms)


# Loss function
def loss_func(pred, gt):
    batch_sz = pred.shape[0]
    diff = pred - gt.view(batch_sz, -1)
    nan_ind = (diff != diff)
    diff[nan_ind] = 0
    loss = diff.pow(2).sum() / (diff.numel() - nan_ind.sum())
    return loss


# initialize the model
model = Model()
model.cuda()
コード例 #16
0
ファイル: train.py プロジェクト: TanmDL/HebbianLearningThesis
def run_train_iter(config, iter_id):
    if config.CONFIG_FAMILY == P.CONFIG_FAMILY_HEBB:
        torch.set_grad_enabled(False)

    # Seed rng
    torch.manual_seed(iter_id)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Load datasets
    print("Preparing dataset manager...")
    dataManager = data.DataManager(config)
    print("Dataset manager ready!")
    print("Preparing training dataset...")
    train_set = dataManager.get_train()
    print("Training dataset ready!")
    print("Preparing validation dataset...")
    val_set = dataManager.get_val()
    print("Validation dataset ready!")

    # Prepare network model to be trained
    print("Preparing network...")
    pre_net, net = load_models(config, iter_id, testing=False)
    criterion = None
    optimizer = None
    scheduler = None
    if config.CONFIG_FAMILY == P.CONFIG_FAMILY_GDES:
        # Instantiate optimizer if we are going to train with gradient descent
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.SGD(net.parameters(),
                              lr=config.LEARNING_RATE,
                              momentum=config.MOMENTUM,
                              weight_decay=config.L2_PENALTY,
                              nesterov=True)
        scheduler = sched.MultiStepLR(optimizer,
                                      gamma=config.LR_DECAY,
                                      milestones=config.MILESTONES)
    print("Network ready!")

    # Train the network
    print("Starting training...")
    train_acc_data = []
    val_acc_data = []
    best_acc = 0.0
    best_epoch = 0
    start_time = time.time()
    for epoch in range(1, config.NUM_EPOCHS + 1):
        # Update LR scheduler
        if scheduler is not None: scheduler.step()

        # Print overall progress information at each epoch
        utils.print_train_progress(epoch, config.NUM_EPOCHS,
                                   time.time() - start_time, best_acc,
                                   best_epoch)

        # Training phase
        print("Training...")
        train_acc = train_pass(net, train_set, config, pre_net, criterion,
                               optimizer)
        print("Training accuracy: {:.2f}%".format(100 * train_acc))

        # Validation phase
        print("Validating...")
        val_acc = eval_pass(net, val_set, config, pre_net)
        print("Validation accuracy: {:.2f}%".format(100 * val_acc))

        # Update training statistics and saving plots
        train_acc_data += [train_acc]
        val_acc_data += [val_acc]
        utils.save_figure(train_acc_data, val_acc_data,
                          config.ACC_PLT_PATH[iter_id])

        # If validation accuracy has improved update best model
        if val_acc > best_acc:
            print("Top accuracy improved! Saving new best model...")
            best_acc = val_acc
            best_epoch = epoch
            utils.save_dict(net.state_dict(), config.MDL_PATH[iter_id])
            if hasattr(net, 'conv1') and net.input_shape == P.INPUT_SHAPE:
                utils.plot_grid(net.conv1.weight, config.KNL_PLT_PATH[iter_id])
            if hasattr(net, 'fc') and net.input_shape == P.INPUT_SHAPE:
                utils.plot_grid(net.fc.weight.view(-1, *P.INPUT_SHAPE),
                                config.KNL_PLT_PATH[iter_id])
            print("Model saved!")