示例#1
0
def plot_lat_vs_time(data, individually=False):
    tss = [s for id, s in data.iteritems()]
    if individually:
        for ts in tss:
            plot_series(np.array(ts), 2, 1)
    else:
        plot_series(tss, 2, 1, variable_length=True)
示例#2
0
def main(args):

    # Device Configuration #
    device = torch.device(
        f'cuda:{args.gpu_num}' if torch.cuda.is_available() else 'cpu')

    # Fix Seed for Reproducibility #
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Samples, Plots, Weights and CSV Path #
    paths = [
        args.samples_path, args.weights_path, args.csv_path,
        args.inference_path
    ]
    for path in paths:
        make_dirs(path)

    # Prepare Data #
    data = pd.read_csv(args.data_path)[args.column]

    # Prepare Data #
    scaler_1 = StandardScaler()
    scaler_2 = StandardScaler()
    preprocessed_data = pre_processing(data, scaler_1, scaler_2, args.constant,
                                       args.delta)

    train_X, train_Y, test_X, test_Y = prepare_data(data, preprocessed_data,
                                                    args)

    train_X = moving_windows(train_X, args.ts_dim)
    train_Y = moving_windows(train_Y, args.ts_dim)

    test_X = moving_windows(test_X, args.ts_dim)
    test_Y = moving_windows(test_Y, args.ts_dim)

    # Prepare Networks #
    if args.model == 'conv':
        D = ConvDiscriminator(args.ts_dim).to(device)
        G = ConvGenerator(args.latent_dim, args.ts_dim).to(device)

    elif args.model == 'lstm':
        D = LSTMDiscriminator(args.ts_dim).to(device)
        G = LSTMGenerator(args.latent_dim, args.ts_dim).to(device)

    else:
        raise NotImplementedError

    #########
    # Train #
    #########

    if args.mode == 'train':

        # Loss Function #
        if args.criterion == 'l2':
            criterion = nn.MSELoss()

        elif args.criterion == 'wgangp':
            pass

        else:
            raise NotImplementedError

        # Optimizers #
        if args.optim == 'sgd':
            D_optim = torch.optim.SGD(D.parameters(), lr=args.lr, momentum=0.9)
            G_optim = torch.optim.SGD(G.parameters(), lr=args.lr, momentum=0.9)

        elif args.optim == 'adam':
            D_optim = torch.optim.Adam(D.parameters(),
                                       lr=args.lr,
                                       betas=(0., 0.9))
            G_optim = torch.optim.Adam(G.parameters(),
                                       lr=args.lr,
                                       betas=(0., 0.9))

        else:
            raise NotImplementedError

        D_optim_scheduler = get_lr_scheduler(D_optim, args)
        G_optim_scheduler = get_lr_scheduler(G_optim, args)

        # Lists #
        D_losses, G_losses = list(), list()

        # Train #
        print(
            "Training Time Series GAN started with total epoch of {}.".format(
                args.num_epochs))

        for epoch in range(args.num_epochs):

            # Initialize Optimizers #
            G_optim.zero_grad()
            D_optim.zero_grad()

            #######################
            # Train Discriminator #
            #######################

            if args.criterion == 'l2':
                n_critics = 1
            elif args.criterion == 'wgangp':
                n_critics = 5

            for j in range(n_critics):
                series, start_dates = get_samples(train_X, train_Y,
                                                  args.batch_size)

                # Data Preparation #
                series = series.to(device)
                noise = torch.randn(args.batch_size, 1,
                                    args.latent_dim).to(device)

                # Adversarial Loss using Real Image #
                prob_real = D(series.float())

                if args.criterion == 'l2':
                    real_labels = torch.ones(prob_real.size()).to(device)
                    D_real_loss = criterion(prob_real, real_labels)

                elif args.criterion == 'wgangp':
                    D_real_loss = -torch.mean(prob_real)

                # Adversarial Loss using Fake Image #
                fake_series = G(noise)
                prob_fake = D(fake_series.detach())

                if args.criterion == 'l2':
                    fake_labels = torch.zeros(prob_fake.size()).to(device)
                    D_fake_loss = criterion(prob_fake, fake_labels)

                elif args.criterion == 'wgangp':
                    D_fake_loss = torch.mean(prob_fake)
                    D_gp_loss = args.lambda_gp * get_gradient_penalty(
                        D, series.float(), fake_series.float(), device)

                # Calculate Total Discriminator Loss #
                D_loss = D_fake_loss + D_real_loss

                if args.criterion == 'wgangp':
                    D_loss += args.lambda_gp * D_gp_loss

                # Back Propagation and Update #
                D_loss.backward()
                D_optim.step()

            ###################
            # Train Generator #
            ###################

            # Adversarial Loss #
            fake_series = G(noise)
            prob_fake = D(fake_series)

            # Calculate Total Generator Loss #
            if args.criterion == 'l2':
                real_labels = torch.ones(prob_fake.size()).to(device)
                G_loss = criterion(prob_fake, real_labels)

            elif args.criterion == 'wgangp':
                G_loss = -torch.mean(prob_fake)

            # Back Propagation and Update #
            G_loss.backward()
            G_optim.step()

            # Add items to Lists #
            D_losses.append(D_loss.item())
            G_losses.append(G_loss.item())

            # Adjust Learning Rate #
            D_optim_scheduler.step()
            G_optim_scheduler.step()

            # Print Statistics, Save Model Weights and Series #
            if (epoch + 1) % args.log_every == 0:

                # Print Statistics and Save Model #
                print("Epochs [{}/{}] | D Loss {:.4f} | G Loss {:.4f}".format(
                    epoch + 1, args.num_epochs, np.average(D_losses),
                    np.average(G_losses)))
                torch.save(
                    G.state_dict(),
                    os.path.join(
                        args.weights_path,
                        'TS_using{}_and_{}_Epoch_{}.pkl'.format(
                            G.__class__.__name__, args.criterion.upper(),
                            epoch + 1)))

                # Generate Samples and Save Plots and CSVs #
                series, fake_series = generate_fake_samples(
                    test_X, test_Y, G, scaler_1, scaler_2, args, device)
                plot_series(series, fake_series, G, epoch, args,
                            args.samples_path)
                make_csv(series, fake_series, G, epoch, args, args.csv_path)

    ########
    # Test #
    ########

    elif args.mode == 'test':

        # Load Model Weights #
        G.load_state_dict(
            torch.load(
                os.path.join(
                    args.weights_path, 'TS_using{}_and_{}_Epoch_{}.pkl'.format(
                        G.__class__.__name__, args.criterion.upper(),
                        args.num_epochs))))

        # Lists #
        real, fake = list(), list()

        # Inference #
        for idx in range(0, test_X.shape[0], args.ts_dim):

            # Do not plot if the remaining data is less than time dimension #
            end_ix = idx + args.ts_dim

            if end_ix > len(test_X) - 1:
                break

            # Prepare Data #
            test_data = test_X[idx, :]
            test_data = np.expand_dims(test_data, axis=0)
            test_data = np.expand_dims(test_data, axis=1)
            test_data = torch.from_numpy(test_data).to(device)

            start = test_Y[idx, 0]

            noise = torch.randn(args.val_batch_size, 1,
                                args.latent_dim).to(device)

            # Generate Fake Data #
            with torch.no_grad():
                fake_series = G(noise)

            # Convert to Numpy format for Saving #
            test_data = np.squeeze(test_data.cpu().data.numpy())
            fake_series = np.squeeze(fake_series.cpu().data.numpy())

            test_data = post_processing(test_data, start, scaler_1, scaler_2,
                                        args.delta)
            fake_series = post_processing(fake_series, start, scaler_1,
                                          scaler_2, args.delta)

            real += test_data.tolist()
            fake += fake_series.tolist()

        # Plot, Save to CSV file and Derive Metrics #
        plot_series(real, fake, G, args.num_epochs - 1, args,
                    args.inference_path)
        make_csv(real, fake, G, args.num_epochs - 1, args, args.inference_path)
        derive_metrics(real, fake, args)

    else:
        raise NotImplementedError
示例#3
0
    def run_test(self):
        SAMPLES_PER_CLASS = 50
        N_CLASSES = 10
        TIME = 150
        BIN_SIZE = 10
        DELAY = 50
        DURATION = 10
        SPARSITY = 0.05
        CI_LVL = 0.95

        # Determine the output and spatio-temporal response to various patterns, including unknown classes
        for model in ["scratch", "trained"]:
            if model == "trained":  # Initially compute test statistics with model initialized from scratch, then do the same with trained model
                try:
                    self.network: Net = load(self.config.RESULT_FOLDER +
                                             "/model.pt")
                except FileNotFoundError as e:
                    print("No saved network model found.")
                    raise e
                # Direct network to GPU
                if P.GPU: self.network.to_gpu()
                self.stats_manager = utils.StatsManager(
                    self.network, self.config.CLASSES, self.config.ASSIGNMENTS)
            self.network.train(False)
            print("Testing " + model + " model...")

            for type in ["out", "st"]:
                if type == "out":
                    print("Computing output responses for various patterns")
                else:
                    print(
                        "Computing spatio-temporal responses for various patterns"
                    )
                unk = None
                for k in range(N_CLASSES + 1):
                    pattern_name = str(k) if k < N_CLASSES else "rnd"
                    print("Pattern: " + pattern_name)
                    encoder = PoissonEncoder(
                        time=self.config.TIME, dt=self.config.DT
                    ) if type == "out" else utils.CustomEncoder(
                        TIME, DELAY, DURATION, self.config.DT, SPARSITY)
                    dataset = self.data_manager.get_test(
                        [k], encoder,
                        SAMPLES_PER_CLASS) if k < N_CLASSES else None
                    # Get next input sample.
                    input_enc = next(
                        iter(dataset)
                    )["encoded_image"] if k < N_CLASSES else encoder(
                        torch.cat(
                            (torch.rand(SAMPLES_PER_CLASS, *
                                        self.config.INPT_SHAPE) *
                             (self.config.INPT_NORM /
                              (.25 * self.config.INPT_SHAPE[1] *
                               self.config.INPT_SHAPE[2])
                              if self.config.INPT_NORM is not None else 1.),
                             torch.zeros(SAMPLES_PER_CLASS, *
                                         self.config.LABEL_SHAPE)),
                            dim=3) * self.config.INTENSITY)
                    if P.GPU: input_enc = input_enc.cuda()
                    # Run the network on the input without labels
                    self.network.run(
                        inputs={"X": input_enc},
                        time=self.config.TIME if type == "out" else TIME)
                    # Update network activity monitoring
                    res = self.stats_manager.get_class_scores(
                    ) if type == "out" else self.stats_manager.get_st_resp(
                        bin_size=BIN_SIZE)
                    if k not in self.config.CLASSES and k < N_CLASSES:
                        unk = res if unk is None else torch.cat(
                            (unk, res), dim=0)
                    # Reset network state
                    self.network.reset_state_variables()
                    # Save results
                    if type == "out":
                        mean = res.mean(dim=0)
                        std = res.std(dim=0)
                        count = res.size(0)
                        utils.plot_out_resp(
                            [mean], [std], [count], [pattern_name + " out"],
                            self.config.CLASSES, self.config.RESULT_FOLDER +
                            "/" + model + "/out_mean_" + pattern_name + ".png",
                            CI_LVL)
                        utils.plot_out_dist(
                            mean, std, self.config.CLASSES,
                            self.config.RESULT_FOLDER + "/" + model +
                            "/out_dist_" + pattern_name + ".png")
                    else:
                        utils.plot_st_resp(
                            [res.mean(dim=0)[:, :, [0, 3, 6, 9]]],
                            [pattern_name + " resp."], BIN_SIZE,
                            self.config.RESULT_FOLDER + "/" + model +
                            "/st_resp_" + pattern_name + ".png")
                        res = res.mean(dim=3).mean(dim=2)
                        utils.plot_series([res.mean(dim=0)], [res.std(dim=0)],
                                          [pattern_name + " resp."], BIN_SIZE,
                                          self.config.RESULT_FOLDER + "/" +
                                          model + "/time_resp_" +
                                          pattern_name + ".png", CI_LVL)
                print("Pattern: unk")
                if type == "out":
                    mean = unk.mean(dim=0)
                    std = unk.std(dim=0)
                    count = unk.size(0)
                    utils.plot_out_resp([mean], [std], [count], ["unk out"],
                                        self.config.CLASSES,
                                        self.config.RESULT_FOLDER + "/" +
                                        model + "/out_mean_unk.png", CI_LVL)
                    utils.plot_out_dist(
                        mean, std, self.config.CLASSES,
                        self.config.RESULT_FOLDER + "/" + model +
                        "/out_dist_unk.png")

                else:
                    utils.plot_st_resp([unk.mean(dim=0)[:, :, [0, 3, 6, 9]]],
                                       ["unk resp."], BIN_SIZE,
                                       self.config.RESULT_FOLDER + "/" +
                                       model + "/st_resp_unk.png")
                    unk = unk.mean(dim=3).mean(dim=2)
                    utils.plot_series([unk.mean(dim=0)], [unk.std(dim=0)],
                                      ["unk resp."], BIN_SIZE,
                                      self.config.RESULT_FOLDER + "/" + model +
                                      "/time_resp_unk.png", CI_LVL)

        # Plot kernels
        print("Plotting network kernels")
        connections = {
            "inpt": ("X", "Y"),
            "exc": ("Y", "Y"),
            "inh": ("Z", "Y")
        }
        lin_coord = self.network.coord_y_disc.view(
            -1) * self.config.GRID_SHAPE[2] + self.network.coord_x_disc.view(
                -1)
        knl_idx = [
            torch.nonzero(lin_coord == i)
            for i in range(self.config.GRID_SHAPE[1] *
                           self.config.GRID_SHAPE[2])
        ]
        knl_idx = [
            knl_idx[i][0] if len(knl_idx[i]) > 0 else None
            for i in range(len(knl_idx))
        ]
        for name, conn in connections.items():
            w = self.network.connections[conn].w.t()
            lin_coord = lin_coord.to(w.device)
            kernels = torch.zeros(self.config.GRID_SHAPE[1] *
                                  self.config.GRID_SHAPE[2],
                                  self.config.GRID_SHAPE[1],
                                  self.config.GRID_SHAPE[2],
                                  device=w.device)
            if name != "inpt":
                w = w.view(
                    self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1],
                    self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1])
                w_red = torch.zeros(
                    self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1],
                    self.config.GRID_SHAPE[1] * self.config.GRID_SHAPE[2],
                    device=w.device)
                for i in range(w.size(1)):
                    w_red[:, lin_coord[i]] += w[:, i]
                w = w_red
            w = w.view(
                self.config.NEURON_SHAPE[0] * self.config.NEURON_SHAPE[1],
                self.config.GRID_SHAPE[1], self.config.GRID_SHAPE[2])
            for i in range(kernels.size(0)):
                if knl_idx[i] is not None:
                    kernels[i, :, :] = w[knl_idx[i], :, :]
            utils.plot_grid(kernels,
                            path=self.config.RESULT_FOLDER + "/weights_" +
                            name + ".png",
                            num_rows=self.config.GRID_SHAPE[1],
                            num_cols=self.config.GRID_SHAPE[2])

        # Calculate accuracy on test set
        print("Evaluating test accuracy...")
        self.eval_pass(self.tst_set, train=False)
        print("Test accuracy: " +
              str(100 * self.stats_manager.eval_accuracy[-1]) + "%")

        print("Finished!")
示例#4
0
window_size = 20
batch_size = 32
shuffle_buffer_size = 1000

# plot_series(time, series, title = "Original Data")
# plt.show()

print("\n  Please be patient! _()_  This might take some time. \n")

# forecast = []
# for time in range(len(series) - window_size):
#   forecast.append(model.predict(series[time:time + window_size][np.newaxis]))

# forecast = forecast[split_time-window_size:]
# results = np.array(forecast)[:, 0, 0]

rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]

plt.figure(figsize=(10, 6))

plot_series(time_valid, X_valid)
plot_series(
    time_valid,
    rnn_forecast,
    title="conv_lstm prediction",
    text="Conv1D(32)\nLSTM(32)\nLSTM(32)\nDense(1)\nloss = Huber\nOptimizer=SGD"
)
plt.show()

# plt.savefig('plotted_graphs/simple_model.png', bbox_inches='tight')
示例#5
0
    print()

    # filter the individuals to get just those that have overlap in data points
    # for at least NUM_YEARS years
    filtered_indivs = max_subset(indivs, SECONDS_PER_YEAR * NUM_YEARS)

    print('individuals with overlapping time series for at least %.f years' %
          NUM_YEARS)
    print(len(filtered_indivs))

    # get the ids of the filtered individuals
    ids = [i.id for i in filtered_indivs]

    # get the time series of the filtered individuals
    filtered_tss = [v for k, v in data.iteritems() if k in ids]

    # interpolate for the filtered individual set
    normd_tss = normalize_time_series(filtered_tss)

    normd_tss = np.array(normd_tss)
    print('shape of all time series after normalization')
    print(normd_tss.shape)
    print()

    # plot lat vs time
    plot_series(normd_tss, 2, 1)

    # plot lat vs lon
    plot_series(normd_tss)
示例#6
0
    normd_tss = normalize_time_series(tss, downsample_factor=args.ds_factor)

    print('Shape of normalized data')
    print(normd_tss.shape)
    print()

    # extract just the lat and lon coordinates since this is all we want to use for DTW
    ptss = extract_lat_and_lon(normd_tss)

    print('Shape of data without time')
    print(ptss.shape)
    print()

    # instantiate a clusterer
    print('Instantiating a clusterer')
    clusterer = TsCluster(args.n_clusts, args.norm, stopping_threshold=args.st)
    print('done')
    print()

    print('Clustering')
    t0 = time()
    clusterer.k_means_clust(ptss, 100, verbose=True)
    dur = time() - t0
    print('done in %fs' % dur)
    print()

    centroids = clusterer.get_centroids()

    print('plotting centroids')
    plot_series(centroids)
示例#7
0
def run_bio_exp():
    # For reproducibility
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    # Make inputs
    regL = torch.zeros(1, *GRID_SHAPE)
    regL[:, :, :, 0] = 1.
    regL[:, :, -1, :] = 1.
    upsL = torch.zeros(1, *GRID_SHAPE)
    upsL[:, :, 0, :] = 1.
    upsL[:, :, :, -1] = 1.
    patterns = [regL, upsL]
    ptn_names = ["regL", "upsL"]

    try:  # Try to load simulation results from file
        results = torch.load(RESULTS_PATH + "/results.pt")
        print("Found existing result file, results loaded")
    except FileNotFoundError:  # If file is not available, compute the results
        results = {
            "bef_tet":
            torch.empty(NUM_ITERS,
                        len(patterns),
                        NUM_BINS,
                        GRID_SHAPE[1],
                        GRID_SHAPE[2],
                        device=DEVICE),
            "aft_tet":
            torch.empty(NUM_ITERS,
                        len(patterns),
                        NUM_BINS,
                        GRID_SHAPE[1],
                        GRID_SHAPE[2],
                        device=DEVICE)
        }
        for seed in range(NUM_ITERS):
            print("####    CURRENT ITERATION: " + str(seed) + "    ####")
            torch.manual_seed(seed)
            trn_encoder = PoissonEncoder(time=TIME, dt=DT)
            tst_encoder = PoissonEncoder(
                time=TIME,
                dt=DT) if TST_ENCODER == ENC_POISSON else CustomEncoder(
                    time=TIME,
                    delay=DELAY,
                    duration=DURATION,
                    dt=DT,
                    sparsity=SPARSITY,
                    noise_intensity=NOISE_INTENSITY)

            # Prepare network
            print("Preparing networks")
            net: Net = Net(inpt_shape=GRID_SHAPE,
                           neuron_shape=NEURON_SHAPE,
                           lbound=V_LB,
                           vrest=V_REST,
                           vreset=V_RESET,
                           vth=V_TH,
                           theta_w=THETA_W,
                           sigma=SIGMA,
                           conn_strength=CONN_STRENGTH,
                           sigma_lateral_exc=SIGMA_LATERAL_EXC,
                           exc_strength=EXC_STRENGTH,
                           sigma_lateral_inh=SIGMA_LATERAL_INH,
                           inh_strength=INH_STRENGTH,
                           dt=DT,
                           refrac=REFR,
                           tc_decay=V_DECAY,
                           tc_trace=TR_DECAY,
                           nu=LEARNING_RATE)
            # Direct networks to GPU
            if GPU: net = net.to_gpu()

            # Test the network before tetanization
            print("Testing network before tetanization")
            results["bef_tet"][seed, :, :, :, :] = bio_test(
                net, patterns, tst_encoder)

            # Tetanize network on lower-L input
            print("Tetanizing network")
            bio_trn(net, regL, trn_encoder)

            # Test the network after tetanization
            print("Testing network after tetanization")
            results["aft_tet"][seed, :, :, :, :] = bio_test(
                net, patterns, tst_encoder)

            print("Done\n")

        print("Saving results")
        os.makedirs(RESULTS_PATH, exist_ok=True)
        torch.save(results, RESULTS_PATH + "/results.pt")

    print("Saving plots")
    bio_mean_series = {
        "bef_tet":
        torch.tensor([
            [
                0., 0., 0., 0., 0., 0.4, 0.5, 0.4, 0.3, 0.2, 0.1, 0.05, 0.05,
                0.05, 0.05
            ],  # pattern regL
            [
                0., 0., 0., 0., 0., 0.4, 0.6, 0.5, 0.3, 0.2, 0.1, 0.05, 0.05,
                0.05, 0.05
            ]  # pattern upsL
        ]),
        "aft_tet":
        torch.tensor([
            [
                0., 0., 0., 0., 0., 0.7, 1.1, 0.8, 0.6, 0.5, 0.4, 0.4, 0.3,
                0.3, 0.2
            ],  # pattern regL
            [
                0., 0., 0., 0., 0., 0.4, 0.5, 0.4, 0.2, 0.1, 0.1, 0.1, 0.1,
                0.1, 0.1
            ]  # pattern upsL
        ]),
    }
    bio_std_series = {
        "bef_tet":
        torch.tensor([
            [
                0., 0., 0., 0., 0., 0.1, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05,
                0.05, 0.05, 0.05
            ],  # pattern regL
            [
                0., 0., 0., 0., 0., 0.1, 0.1, 0.1, 0.05, 0.05, 0.05, 0.05,
                0.05, 0.05, 0.05
            ]  # pattern upsL
        ]),
        "aft_tet":
        torch.tensor([
            [
                0., 0., 0., 0., 0., 0.15, 0.3, 0.2, 0.05, 0.15, 0.15, 0.15,
                0.15, 0.15, 0.1
            ],  # pattern regL
            [
                0., 0., 0., 0., 0., 0.1, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05,
                0.05, 0.05, 0.05
            ]  # pattern upsL
        ]),
    }
    decay_time = {"bef_tet": 20., "aft_tet": 50.}
    bio_mean = {
        "bef_tet": torch.tensor([5.6, 4.7]),
        "aft_tet": torch.tensor([6.4, 4.1])
    }
    bio_std = {
        "bef_tet": torch.tensor([0.8, 0.8]),
        "aft_tet": torch.tensor([0.7, 0.5])
    }
    bio_count = 4
    bio_st_resp = {
        "bef_tet":
        torch.tensor([
            [
                [0.5, 0.2, 0.2, 0.2],
                [0.5, 0., 0.5, 0.],
                [0.2, 1., 0.2, 0.2],
                [1., 0., 0., 0.],
                [0., 1., 0.5, 0.],
                [1., 0., 1., 0.2],
            ],  # pattern regL
            [
                [0.2, 1., 1., 1.],
                [0.5, 1., 0.2, 1.5],
                [0.5, 0.2, 0.2, 0.5],
                [0.5, 0.2, 0.5, 0.],
                [0.2, 0.2, 0., 0.],
                [0.2, 0., 0., 0.],
            ]  # pattern upsL
        ]),
        "aft_tet":
        torch.tensor([
            [
                [1.8, 1., 0., 0.5],
                [1.8, 1., 1.8, 0.],
                [1., 1.8, 0., 0.5],
                [1.8, 1., 0.1, 0.2],
                [1., 1.8, 1., 1.],
                [1.8, 0., 1.8, 0.5],
            ],  # pattern regL
            [
                [0., 1., 1., 0.5],
                [0.5, 0.5, 0., 1.],
                [0.5, 0.5, 0.2, 0.2],
                [0.2, 0.2, 0.5, 0.],
                [0., 0.2, 0., 0.],
                [0.2, 0., 0., 0.],
            ]  # pattern upsL
        ]),
    }
    for k in results.keys():
        out = None
        for i in range(len(patterns)):
            res = results[k][:, i, 0:OUT_BINS, :, :]
            bio = bio_st_resp[k][i].unsqueeze(0) * torch.cat(
                (torch.zeros(DELAY_BINS),
                 torch.exp(-torch.arange(OUT_BINS - DELAY_BINS) /
                           (TAU / BIN_SIZE))),
                dim=0).view(-1, 1, 1)
            utils.plot_st_resp([res[:, :, :, [0, 3, 6, 9]].mean(dim=0), bio],
                               ["Simul.", "Biol."], BIN_SIZE, RESULTS_PATH +
                               "/" + k + "/st_resp_" + ptn_names[i] + ".png")
            res = res.mean(dim=3).mean(dim=2)
            utils.plot_series([res.mean(dim=0), bio_mean_series[k][i]],
                              [res.std(dim=0), bio_std_series[k][i]],
                              ["Simul.", "Biol."], BIN_SIZE, RESULTS_PATH +
                              "/" + k + "/time_resp_" + ptn_names[i] + ".png",
                              CI_LVL)
            bin_count = int(decay_time[k]) // BIN_SIZE
            res = res[:, DELAY_BINS:DELAY_BINS + bin_count].mean(
                dim=1, keepdim=True) * BIN_SIZE
            out = res if out is None else torch.cat((out, res), dim=1)
        mean = out.mean(dim=0)
        std = out.std(dim=0)
        count = out.size(0)
        utils.plot_out_resp([mean, bio_mean[k]], [std, bio_std[k]],
                            [count, bio_count], ["Simul.", "Biol."], ptn_names,
                            RESULTS_PATH + "/" + k + "/out_mean.png", CI_LVL)
        utils.plot_out_dist(mean, std, ptn_names,
                            RESULTS_PATH + "/" + k + "/out_dist_simul.png")
        utils.plot_out_dist(bio_mean[k], bio_std[k], ptn_names,
                            RESULTS_PATH + "/" + k + "/out_dist_biol.png")

    print("Finished")
示例#8
0
import matplotlib.pyplot as plt

from model import simple_model, lstm_model, conv_lstm
from utils import windowed_dataset, plot_series, windowed_dataset1
from data_reader import load_data


series, time = load_data()

split_time = 3000
time_train = time[:split_time]
X_train = series[:split_time]
time_valid = time[split_time:]
X_valid = series[split_time:]

plot_series(time, series)
plt.show()

window_size = 30
batch_size = 32
shuffle_buffer_size  = 1000

print(X_train.shape)
dataset = windowed_dataset1(X_train, window_size, batch_size, shuffle_buffer_size)
print(dataset)

#  IMPORT MODEL
model = conv_lstm(window_size)

lr_schedule = tf.keras.callbacks.LearningRateScheduler(lambda epoch: 1e-8 * 10 ** (epoch/20))
示例#9
0
    return errs


if __name__ == '__main__':

    # get a map of individual id to all its data, ordered by time
    indiv_to_ts = get_data_by_individual(args.fpath)

    print('Number of individuals')
    print(len(indiv_to_ts))
    print()

    if args.plot:
        # let's plot out Irma's latitude against time
        print('Plotting Irma\'s latitude over time')
        plot_series(indiv_to_ts['Irma'], 2, 1)
        print()

    rdr = None

    # optionally define a relative date range, e.g.
    if args.rdr:
        start = RelativeDate(args.rdr[0], args.rdr[1])
        end = RelativeDate(args.rdr[2], args.rdr[3])
        rdr = RelativeDateRange(start, end)

    # get all the time series (splits)
    tsos = get_time_series(indiv_to_ts, rdr, args.it)
    print()

    print('Total number of time series')