def draw_spec(self, cutoff=None, already_flipped=False):
        '''
        Draw the spectrogram of self.samples as loaded by load_samples(),
        cutting off at sample `cutoff` if provided
        '''

        if cutoff:
            freqs, times, spect = make_spect(self.samples[:cutoff],
                                             self.samples_per_seg,
                                             self.overlap_percent,
                                             self.sample_rate)
        else:
            freqs, times, spect = make_spect(self.samples,
                                             self.samples_per_seg,
                                             self.overlap_percent,
                                             self.sample_rate)

        flip_axis = True
        if already_flipped:
            flip_axis = False

        plotter(spect,
                self.ax,
                upside_down=flip_axis,
                title=self.files[self.position])

        self.fig.canvas.draw()
Beispiel #2
0
 def test_plot_kline(self):
     k = plotter()
     data_path = "../quantist3/data/pool/"
     result_path = "../quantist3/data/result/"
     #result_name = 'plot_kline'
     data = pd.read_excel(data_path + 'sh2.xls')
     k.plot_kline(data, show_num=120)  # sh
Beispiel #3
0
def test_semeion(trainer_type, testel, trainel):
    images_dir = "/Users/jian/Dropbox/AI_dropbox/progetto_2014/dummy_data_set/courier_digits_data_set/tiff_images_swidth"
    results_dir = "/Users/jian/Dropbox/AI_dropbox/progetto_2014/results/test_semeion" + "/" + trainer_type
    semeion_dir = "/Users/jian/Dropbox/AI_dropbox/progetto_2014/semeion_data_set/semeion.data"
    filename = results_dir + "/" + "tr" + str(trainel) + "_ts"+ str(testel)  + "_c" + str(corr_ratio) + "_e" + str(erase_ratio) + "_" + trainer_type + "." + filetype
    dim = [16, 16]

    image_dim = [dim[1], dim[0]]  # changing shape for images

    # Loading images data set
    temp_train = iM.collectimages(image_dim, images_dir, filter)

    # image conversion to 1 and -1 for Hopfield net
    for i in range(temp_train.shape[0]):
        temp = utl.image_converter(temp_train[i].reshape(dim))
        temp_train[i] = temp.flatten()

    train_input = np.zeros((trainel, dim[0] * dim[1]))
    for i in range(trainel):
        train_input[i] = temp_train[i]

    # training the net
    net = HopfieldNet.HopfieldNet(train_input, trainer_type, dim)

    # loading semeion data set
    test_set = np.zeros((testel, dim[0], dim[1]))
    for i in range(testel):
        test_set[i] = utl.semeion_loader(semeion_dir, i)

    # testing the net
    result_set = np.zeros((testel, dim[0], dim[1]))
    for i in range(testel):
        # test_set[i] = temp_train[i].reshape(dim)
        if corr_ratio != 0:
            test_set[i] = utl.corrupter(test_set[i], corr_ratio)
        if erase_ratio != 0:
            test_set[i] = utl.image_eraser(test_set[i], erase_ratio)
        result_set[i] = net.test(test_set[i])

    # Plotting and saving results
    utl.plotter(test_set, result_set, filename, plotbool, savebool)
Beispiel #4
0
def get_moves_old(game_map, turns, pid, training=False, graph=False):
    w = game_map.width
    h = game_map.height
    me = game_map.get_me()
    out = np.zeros((w, h))
    graph_o, all_axes, F, Z = get_gx(game_map, training)
    gradU, gradV = np.gradient(Z, axis=(0, 1))  # gradient of func
    for idx, ship in enumerate(me.all_ships()):
        sx, sy = int(ship.x), int(ship.y)
        sv = vector(ship)  # vector
        # Distance/Magnitude/Norm/Length = np.sqrt(x**2+y**2) = np.sqrt([x,y].dot([x,y])
        dx, dy = gradU[sx][sy], gradV[sx][sy]  # unit vector of grad @ sx,sy
        gm = F.norm(dx, dy)
        u, v = -gm * dx, -gm * dy
        angle = degrees(np.arctan2(v, u)) % 360
        out[sx][sy] = angle
        is_last_ship = idx == len(me.all_ships()) - 1
        is_my_pid = pid == 0
        if is_last_ship and is_my_pid and graph:
            plotter(Z, sv, graph_o, turns, pid, w, h)
    graph_o.clear()
    return out
Beispiel #5
0
def test2(trainer_type, testel, trainel):
    images_dir = "/Users/jian/Dropbox/AI_dropbox/progetto_2014/dummy_data_set/courier_digits_data_set/tiff_images_swidth"
    results_dir = "/Users/jian/Dropbox/AI_dropbox/progetto_2014/results/test_2" + "/" + trainer_type
    filename = results_dir + "/" + filter + "_" +  "tr" + str(trainel) + "_ts"+ str(testel)  + "_c" + str(corr_ratio) + "_e" + str(erase_ratio) + "_" + trainer_type + "." + filetype
    dim = [14, 9]  # in the form rows * cols
    # testel = 8  # elements for training
    #corruption_val = 5
    # trainers = ["hebbian","pseudoinv","storkey"]

    image_dim = [dim[1], dim[0]]  # changing shape for images

    # Loading images data set
    temp_train = iM.collectimages(image_dim, images_dir, filter)

    # image conversion to 1 and -1 for Hopfield net
    for i in range(temp_train.shape[0]):
        temp = utl.image_converter(temp_train[i].reshape(dim))
        temp_train[i] = temp.flatten()

    train_input = np.zeros((trainel, dim[0] * dim[1]))
    for i in range(trainel):
        train_input[i] = temp_train[i]

    # training the net
    net = HopfieldNet.HopfieldNet(train_input, trainer_type, dim)

    # testing the net
    test_set = np.zeros((testel, dim[0], dim[1]))
    result_set = np.zeros((testel, dim[0], dim[1]))
    for i in range(testel):
        test_set[i] = temp_train[i].reshape(dim)
        if corr_ratio != 0:
            test_set[i] = utl.corrupter(test_set[i], corr_ratio)
        if erase_ratio != 0:
            test_set[i] = utl.image_eraser(test_set[i], erase_ratio)
        result_set[i] = net.test(test_set[i])

    # Plotting and saving results
    utl.plotter(test_set, result_set, filename, plotbool, savebool)
Beispiel #6
0
def get_moves(game_map, turns, pid, training=False, graph=False):
    w = game_map.width
    h = game_map.height
    me = game_map.get_me()
    ships = me.all_ships()
    out = {}
    if graph:
        grad_u, grad_v, graph_objs, Func, gridZ = get_gradient(
            ships, game_map, graph)
    else:
        grad_u, grad_v = get_gradient(ships, game_map, graph)
    for idx, ship in enumerate(ships):
        sx, sy = int(ship.x), int(ship.y)
        # Distance/Magnitude/Norm/Length = np.sqrt(x**2+y**2) = np.sqrt([x,y].dot([x,y])
        sv = vector(ship)
        u, v = grad_u[idx], grad_v[idx]
        angle = degrees(np.arctan2(v, u)) % 360
        out[sx, sy] = angle
        is_last_ship = idx == len(ships) - 1
        is_my_pid = pid == 0
        if graph and is_last_ship and is_my_pid:
            plotter(gridZ, sv, graph_objs, turns, pid, w, h)
    graph_objs.clear()
    return out
    hyp2 = hyperParameters()

    hyp2.cov = np.array([0.0,0.0])
    hyp2.lik = np.array([np.log(0.1)])
    #vargout = min_wrapper(hyp2,gp,'CG',inffunc,[],covfunc,likfunc,x,y,None,None,True)
    #hyp2 = vargout[0]
    hyp2.cov = np.array([-0.993396880620537,0.685943441677086])
    hyp2.lik = np.array([-1.902546786026883])
    #vargout = gp(hyp2,inffunc,[],covfunc,likfunc,x,y,None,None,False)
    #print "nlml2 = ",vargout[0]

    vargout = gp(hyp2,inffunc,[],covfunc,likfunc,x,y,z)
    ym = vargout[0]; ys2 = vargout[1]
    m  = vargout[2]; s2  = vargout[3]
    ## Plot results
    plotter(z,ym,ys2,x,y,[-1.9, 1.9, -0.9, 3.9])
    ###########################################################
    '''covfunc = [ ['kernels.covSEiso'] ]
    hyp = hyperParameters()

    hyp.cov = np.array([0.0,0.0])
    hyp.mean = np.array([0.0,0.0])
    hyp.lik = np.array([np.log(0.1)])

    vargout = min_wrapper(hyp,gp,'BFGS',inffunc,meanfunc,covfunc,likfunc,x,y,None,None,True)
    hyp = vargout[0]
    hyp.mean = np.array([1.1919,1.4625])
    hyp.cov = np.array([-1.1513,-0.4559])
    hyp.lik = np.array([-1.9122])
    vargout = gp(hyp,inffunc,meanfunc,covfunc,likfunc,x,y,z)
    ym = vargout[2]; ys2 = vargout[3]
    def train(self):
        # initialize memory buffer
        buffer = ReplayBuffer(int(500000), self.batch_size, self.num_agents, 0)

        # use keep_awake to keep workspace from disconnecting
        for episode in range(self.number_of_episodes):
            env_info = self.env.reset(train_mode=True)[self.brain_name]

            agent_episode_rewards = [0, 0]

            for agent in self.maddpg.ddpg_agents:
                agent.noise.reset()

            for episode_t in range(self.max_episode_len):
                states = env_info.vector_observations
                states_t = to_tensor(states)

                with torch.no_grad():
                    action_ts = self.maddpg.act(states_t, noise=self.noise)
                    self.noise *= self.noise_reduction

                actions = torch.stack(action_ts).numpy()
                env_info = self.env.step(actions)[self.brain_name]

                next_states = env_info.vector_observations
                rewards = env_info.rewards
                dones = env_info.local_done

                for i in range(self.num_agents):
                    agent_episode_rewards[i] += rewards[i]

                full_state = np.concatenate(states)
                full_next_state = np.concatenate(next_states)

                buffer.add((states, full_state, actions, rewards, next_states, full_next_state, dones))

                # update once after every episode_per_update
                critic_losses = []
                actor_losses = []
                if len(buffer) > self.batch_size and episode % self.episode_per_update == 0:
                    for i in range(self.num_agents):
                        samples = buffer.sample()
                        cl, al = self.maddpg.update(samples, i)
                        critic_losses.append(cl)
                        actor_losses.append(al)
                    self.maddpg.update_targets()  # soft update the target network towards the actual networks

                if np.any(dones):
                    # if any of the agents are done break
                    break

            episode_reward = max(agent_episode_rewards)
            self.episode_rewards.append(episode_reward)
            self.last_100_episode_rewards.append(episode_reward)
            self.avg_rewards.append(np.mean(self.last_100_episode_rewards))
            # scores.append(episode_reward)
            print('\rEpisode {}\tAverage Score: {:.4f}\tScore: {:.4f}'.format(episode, self.avg_rewards[-1],
                                                                              episode_reward),
                  end="")

            if episode % self.print_period == 0:
                print('\rEpisode {}\tAverage Score: {:.4f}'.format(episode, self.avg_rewards[-1]))

            # saving successful model
            # training ends when the threshold value is reached.
            if self.avg_rewards[-1] >= self.threshold:
                save_dict_list = []

                for i in range(self.num_agents):
                    save_dict = {'actor_params': self.maddpg.ddpg_agents[i].actor.state_dict(),
                                 'actor_optim_params': self.maddpg.ddpg_agents[i].actor_optimizer.state_dict(),
                                 'critic_params': self.maddpg.ddpg_agents[i].critic.state_dict(),
                                 'critic_optim_params': self.maddpg.ddpg_agents[i].critic_optimizer.state_dict()}
                    save_dict_list.append(save_dict)

                    torch.save(save_dict_list, self.ckpt)

                raw_score_plotter(self.episode_rewards)
                plotter('Tennis', len(self.episode_rewards), self.avg_rewards, self.threshold)
                break
Beispiel #9
0
        features_batch = conv_base.predict(inputs_batch)
        features[i * batch_size:(i + 1) * batch_size] = features_batch
        labels[i * batch_size:(i + 1) * batch_size] = labels_batch
        i += 1
        if i * batch_size >= sample_count:
            break
    return features, labels


train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir, 1000)
test_features, test_labels = extract_features(test_dir, 1000)

train_features = np.reshape(train_features, (2000, 4 * 4 * 512))
validation_features = np.reshape(validation_features, (1000, 4 * 4 * 512))
test_features = np.reshape(test_features, (1000, 4 * 4 * 512))

model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
              loss='binary_crossentropy',
              metrics=['accuracy'])
history = model.fit(train_features,
                    train_labels,
                    epochs=30,
                    batch_size=20,
                    validation_data=(validation_features, validation_labels))
plotter(history.history)
Beispiel #10
0
def train(D, G, curr_lr, lr, n_epoch, beta1, beta2, bs):
    data_loader = CreateDataLoader(batchSize=bs)
    dataset = data_loader.load_data()

    # Create optimizers for the generators and discriminators
    optimizer_G = torch.optim.Adam(G.parameters(), lr=lr, betas=(beta1, beta2))
    optimizer_D = torch.optim.Adam(D.parameters(), lr=lr, betas=(beta1, beta2))

    res_d = []
    res_g = []
    total_steps = 0
    for epoch in range(1, n_epoch + 1):
        print("Running epoch:", epoch)
        start_time_epoch = time.time()
        sum_d = 0
        sum_g = 0
        for i, data in enumerate(dataset):
            total_steps += c.batchSize

            images_X = data['A']
            images_Y = data['B']

            # move images to GPU if available (otherwise stay on CPU)
            # train discriminator on real
            real_A = Variable(images_X.to(device))
            fake_B = G.forward(real_A)
            real_B = Variable(images_Y.to(device))

            # =======================Train the discriminator=======================#
            for iter in range(5):
                optimizer_D.zero_grad()

                # Real images
                D_real = D.forward(real_B)
                # Fake images
                D_fake = D.forward(fake_B.detach())
                # Gradient penalty
                gradient_penalty = calc_gradient_penalty(D, real_B.data, fake_B.data)
                d_loss = D_fake.mean() - D_real.mean() + gradient_penalty
                d_loss.backward(retain_graph=True)

                optimizer_D.step()
                if iter == 4:
                    sum_d += d_loss.item()

            #========================Train the generator===========================#
            optimizer_G.zero_grad()

            fake_B = G.forward(real_A)
            D_fake = D.forward(fake_B)
            g_loss = -D_fake.mean()
            g_contentloss = perceptual_loss(fake_B, real_B) * 100
            g_total_loss = g_loss + g_contentloss
            g_total_loss.backward()

            optimizer_G.step()

            # printing pnsr & SSIM metrics at certain frequency
            if total_steps % c.display_freq == 4:
                image_res = util.get_visuals(real_A, fake_B, real_B)
                psnr = metrics.PSNR(image_res['Restored_Train'], image_res['Sharp_Train'])
                print('PSNR on Train (at epoch {0}) = {1}'.format(epoch, psnr))
                ssim = metrics.SSIM_my(image_res['Restored_Train'], image_res['Sharp_Train'])
                print('SSIM_my on Train (at epoch {0}) = {1}'.format(epoch, ssim))

            # print losses & errors
            # if total_steps % c.print_freq == 0:
            #     err = util.get_errors(g_loss, g_contentloss, d_loss)
            #     t = (time.time() - start_time_epoch) / c.batchSize
            #     util.print_errors(epoch, i, err, t)

            # sum the loss over all the image
            sum_g += g_total_loss.item()

        # decaying learning rate
        if epoch > 150:
            lrd = 0.0001 / 150
            new_lr = curr_lr - lrd

            for param_group in optimizer_D.param_groups:
                param_group['lr'] = new_lr
            for param_group in optimizer_G.param_groups:
                param_group['lr'] = new_lr
            print('Update learning rate: %f -> %f' % (curr_lr, new_lr))
            curr_lr = new_lr

        # saving model after every 50 epochs
        if epoch % c.save_freq == 0:
            torch.save(G.state_dict(), 'model_G_' + str(epoch) + '.pt')
            torch.save(D.state_dict(), 'model_D_' + str(epoch) + '.pt')
        res_d.append(np.mean(sum_d))
        res_g.append(np.mean(sum_g))
        end_time_epoch = time.time()

        print("Time for epoch {0}: {1} | Disc loss: {2}  | Gen loss: {3}".format(epoch, (end_time_epoch - start_time_epoch), res_d[epoch-1], res_g[epoch-1]))

    torch.save(G.state_dict(), 'model_G_last.pt')
    torch.save(D.state_dict(), 'model_D_last.pt')
    print("Model Saved!")
    util.plotter(res_d, res_g)
Beispiel #11
0
def fit(train_dataloader,
        val_dataloader,
        model,
        optimizer,
        loss_fn,
        n_epochs,
        post_classification=False,
        with_acc=False,
        with_scheduler=False):
    train_is_triplet = train_dataloader.dataset.is_triplet
    val_is_triplet = val_dataloader.dataset.is_triplet
    train_len = len(train_dataloader)
    val_len = len(val_dataloader)
    history = []
    for epoch in range(n_epochs):
        model.train()
        train_loss = 0
        val_loss = 0
        train_acc = 0
        val_acc = 0

        for inputs, labels in tqdm(train_dataloader, desc="Train iteration"):
            if with_scheduler:
                prev_sd = model.state_dict()

            optimizer.zero_grad()
            if train_is_triplet:
                anchor, pos, neg = inputs
                anchor = anchor.to(device='cuda')
                pos = pos.to(device='cuda')
                neg = neg.to(device='cuda')
                outputs = model(anchor, pos, neg)
                if not post_classification:
                    loss = loss_fn(*outputs)
                elif post_classification:
                    y0 = torch.tensor([0 for _ in range(len(outputs[0]))
                                       ]).to(device='cuda')
                    y1 = torch.tensor([1 for _ in range(len(outputs[1]))
                                       ]).to(device='cuda')
                    y = torch.cat((y0, y1))
                    outputs = torch.cat((outputs[0], outputs[1]))
                    loss = loss_fn(outputs, y)

            elif not train_is_triplet:
                w0, w1 = inputs
                w0 = w0.to(device='cuda')
                w1 = w1.to(device='cuda')
                labels = labels.to(device='cuda')

                outputs = model(w0, w1)
                loss = loss_fn(outputs, labels)

            loss.backward()
            optimizer.step()
            if with_acc:
                if post_classification:
                    train_acc += accuracy_score(outputs, y)
                elif not post_classification:
                    train_acc += accuracy_score(outputs, labels)

            train_loss += loss.item()
        model.eval()

        with torch.no_grad():
            for inputs, labels in tqdm(val_dataloader, desc="Val iteration"):
                if val_is_triplet:
                    anchor, pos, neg = inputs
                    anchor = anchor.to(device='cuda')
                    pos = pos.to(device='cuda')
                    neg = neg.to(device='cuda')
                    outputs = model(anchor, pos, neg)
                    loss = loss_fn(*outputs)
                else:
                    w0, w1 = inputs
                    w0 = w0.to(device='cuda')
                    w1 = w1.to(device='cuda')
                    labels = labels.to(device='cuda')
                    if not post_classification:
                        outputs = model(w0, w1)
                    elif post_classification:
                        outputs = model.classifire_it(w0, w1)
                    loss = loss_fn(outputs, labels)

                val_loss += loss.item()
                if with_acc:
                    val_acc += accuracy_score(outputs, labels)

        epoch_train_loss = train_loss / train_len
        epoch_val_loss = val_loss / val_len

        if with_acc:
            epoch_train_acc = train_acc / train_len
            epoch_val_acc = val_acc / val_len

        if with_scheduler:
            flag = False
            if epoch > 2:
                flag = True
                if epoch_val_loss > prev_loss:
                    model.load_state_dict(prev_sd)
                    epoch_val_loss = prev_loss
                    optimizer.param_groups[0]['lr'] /= 1.5
                    flag = False
            if (epoch <= 2) or (flag == True):
                history.append([
                    epoch, epoch_train_loss, epoch_val_loss, epoch_train_acc,
                    epoch_val_acc
                ])
                flag = False
            prev_loss = epoch_val_loss

        elif not with_scheduler:
            if with_acc:
                history.append([
                    epoch, epoch_train_loss, epoch_val_loss, epoch_train_acc,
                    epoch_val_acc
                ])
            else:
                history.append([epoch, epoch_train_loss, epoch_val_loss])

        plotter(history)
def train(config_file):

    with open(config_file) as stream:
        config_data = yaml.safe_load(stream)

    use_only_specified_gpu(config_data['data_creation_parameters']['GPU_id'])

    base_model_type = config_data['training_parameters']['base_type']

    print("Reading dataset...")
    reader = HMDBDataReader(
        dataset_directory=config_data['dataset_save_dir'],
        batch_size=config_data['training_parameters']['batch_size'],
        sequence_len=config_data['data_creation_parameters']['sequence_len'],
        base_type=base_model_type)

    train_ds, _, val_ds = reader.get_datasets_sequence()

    if bool(config_data['data_creation_parameters']['display_training_data']):
        print("Displaying sequence...")
        reader.display_sequences_train()

    ##### CHOOSING THE BASE MODEL #####

    if base_model_type == 'VGG':
        base_model = tf.keras.applications.VGG16(include_top=False,
                                                 weights='imagenet')
        feature_map_size = 7
        image_input_size = 224
        filter_no = 512

    else:
        raise ValueError("No proper base model chosen for training!")

    ##### CHOOSING THE ARCHITECTURE #####

    units_first_lstm = config_data['training_parameters']['lstm_parameters'][0]

    if config_data['training_parameters']['model_type'] == 'ALSTM':

        print('Training ALSTM')

        classifier = ALSTM(
            base_model=base_model,
            use_dropout=bool(
                config_data['training_parameters']['use_dropout']),
            train_base=bool(config_data['training_parameters']['train_base']),
            units_first_lstm=units_first_lstm,
            no_classes=config_data['training_parameters']['no_classes'],
            feature_map_size=feature_map_size)

    elif config_data['training_parameters']['model_type'] == 'ConvALSTM':

        print('Training ConvALSTM')

        classifier = ConvALSTM(
            base_model=base_model,
            use_dropout=bool(
                config_data['training_parameters']['use_dropout']),
            train_base=bool(config_data['training_parameters']['train_base']),
            units_first_lstm=units_first_lstm,
            no_classes=config_data['training_parameters']['no_classes'],
            feature_map_size=feature_map_size)
    else:
        raise ValueError("No proper model chosen for training.")

    build_attention_network(
        classifier, config_data['training_parameters']['batch_size'],
        config_data['data_creation_parameters']['sequence_len'],
        units_first_lstm, config_data['training_parameters']['model_type'],
        feature_map_size, image_input_size, filter_no)

    ##### START TRAINING #####

    trainer = AttentionTrainer(
        network=classifier,
        network_name=config_data['training_parameters']['model_type'],
        optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
        loss_object=tf.keras.losses.SparseCategoricalCrossentropy(),
        loss_object_attention_pen=tf.keras.losses.MeanSquaredError(),
        train_loss=tf.keras.metrics.Mean(name='train_loss'),
        train_accuracy=tf.keras.metrics.SparseCategoricalAccuracy(
            name='train_accuracy'),
        train_precision=tf.keras.metrics.Precision(name='train_precision'),
        train_recall=tf.keras.metrics.Recall(name='train_recall'),
        val_loss=tf.keras.metrics.Mean(name='val_loss'),
        val_accuracy=tf.keras.metrics.SparseCategoricalAccuracy(
            name='val_accuracy'),
        val_precision=tf.keras.metrics.Precision(name='val_precision'),
        val_recall=tf.keras.metrics.Recall(name='val_recall'),
        test_loss=tf.keras.metrics.Mean(name='test_loss'),
        test_accuracy=tf.keras.metrics.SparseCategoricalAccuracy(
            name='test_accuracy'),
        test_precision=tf.keras.metrics.Precision(name='test_precision'),
        test_recall=tf.keras.metrics.Recall(name='test_recall'),
        batch_size=config_data['training_parameters']['batch_size'],
        penalty_coeff=config_data['training_parameters']['penalty_coeff'],
        weight_decay=config_data['training_parameters']['weight_decay'])

    training_data = []

    for epoch in range(config_data['training_parameters']['epochs']):
        trainer.train_loss.reset_states()
        trainer.train_accuracy.reset_states()
        trainer.train_precision.reset_states()
        trainer.train_recall.reset_states()
        trainer.val_loss.reset_states()
        trainer.val_accuracy.reset_states()
        trainer.val_precision.reset_states()
        trainer.val_recall.reset_states()

        current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")

        train_log_dir = os.path.join(
            config_data['model_save_dir'],
            'logs/gradient_tape/') + current_time + '/train'
        val_log_dir = os.path.join(
            config_data['model_save_dir'],
            'logs/gradient_tape/') + current_time + '/val'
        train_summary_writer = tf.summary.create_file_writer(train_log_dir)
        val_summary_writer = tf.summary.create_file_writer(val_log_dir)

        print('Training...')
        for (batch, (image, labels)) in enumerate(train_ds):
            if image.shape[0] != config_data['training_parameters'][
                    'batch_size']:
                print('Invalid batch size, skipped...')
            else:
                trainer.train_step(image, labels)

            with train_summary_writer.as_default():
                tf.summary.scalar('train loss',
                                  trainer.train_loss.result(),
                                  step=batch)
                tf.summary.scalar('train accuracy',
                                  trainer.train_accuracy.result(),
                                  step=batch)

        print('Validating...')
        for (batch, (image_val, labels_val)) in enumerate(val_ds):
            if image_val.shape[0] != config_data['training_parameters'][
                    'batch_size']:
                print('Invalid batch size, skipped...')
            else:
                trainer.val_step(image_val, labels_val)

            with val_summary_writer.as_default():
                tf.summary.scalar('val loss',
                                  trainer.val_loss.result(),
                                  step=batch)
                tf.summary.scalar('val accuracy',
                                  trainer.val_accuracy.result(),
                                  step=batch)

        epoch_summary = (epoch + 1, trainer.train_loss.result(),
                         trainer.train_accuracy.result(),
                         f1_score(trainer.train_precision.result(),
                                  trainer.train_recall.result()),
                         trainer.val_loss.result(),
                         trainer.val_accuracy.result(),
                         f1_score(trainer.val_precision.result(),
                                  trainer.val_recall.result()))

        training_data.append(epoch_summary)

        template = 'Epoch {}, Loss: {}, Accuracy:{}, F1 Score: {}, Val Loss: {}, Val Acc: {}, Val F1 Score: {}'

        print(
            template.format(
                epoch_summary[0],
                epoch_summary[1],
                epoch_summary[2],
                epoch_summary[3],
                epoch_summary[4],
                epoch_summary[5],
                epoch_summary[6],
            ))

        model_savedir = os.path.join(
            config_data['model_save_dir'],
            config_data['training_parameters']['model_type'])
        if not os.path.exists(model_savedir):
            os.mkdir(model_savedir)

        plotter(training_data, model_savedir)
        with io.open(os.path.join(model_savedir, '{}'.format(config_file)),
                     'w',
                     encoding='utf8') as outfile:
            yaml.dump(config_data,
                      outfile,
                      default_flow_style=False,
                      allow_unicode=True)

        trainer.save_weights(epoch, model_savedir)
        print("Weights have been saved. Epoch {} done!".format(
            epoch_summary[0]))
Beispiel #13
0
     this_L2 *= wtl2
     this_DGz /= opt.update_measures_plots
     this_Dx /= opt.update_measures_plots
     this_Adv /= opt.update_measures_plots
     this_L2 /= opt.update_measures_plots
     this_G_tot /= opt.update_measures_plots
     this_D_tot /= opt.update_measures_plots
     
     D_G_zs.append(this_DGz)
     D_xs.append(this_Dx)
     Advs.append(this_Adv)
     L2s.append(this_L2)
     G_tots.append(this_G_tot)
     D_tots.append(this_D_tot)
     
     plotter(D_G_zs, D_xs, Advs, L2s, G_tots, D_tots, (len(dataloader) / opt.update_measures_plots), PATHS["plots"])
     
     this_DGz = 0
     this_Dx = 0
     this_Adv = 0
     this_L2 = 0
     this_G_tot = 0
     this_D_tot = 0
     step_counter = 0
 
 if i % opt.update_train_img == 0:
     if not opt.jointD:
         recon_image = input_cropped.clone()
         recon_image.data[:, :,
         int(opt.imageSize / 2 - opt.patchSize / 2):int(opt.imageSize / 2 + opt.patchSize / 2),
         int(opt.imageSize / 2 - opt.patchSize / 2):int(opt.imageSize / 2 + opt.patchSize / 2)] = fake.data
    stacked_x = y_pred.reshape(
        (y_pred.shape[0], y_pred.shape[1] * y_pred.shape[2]))
    # split stacked samples to train and test
    stacked_x_train, stacked_x_test,\
        stacked_y_train, stacked_y_test = train_test_split(stacked_x, y_test, train_size=0.75, random_state=0)
    # train the same MLP against stacked data
    stacked_model, stacked_history = get_simple_mlp(stacked_x_train,
                                                    stacked_y_train,
                                                    dimensions=n * classes,
                                                    classes=classes,
                                                    epochs=500)
    stacked_loss, stacked_acc = stacked_model.evaluate(stacked_x_test,
                                                       stacked_y_test,
                                                       verbose=0)
    print('Stacked accuracy: %.3f' % stacked_acc)
    print('Stacked loss: %.3f' % stacked_loss)
    plotter(stacked_history.history)

    etc = ExtraTreesClassifier(n_jobs=-1)
    etc.fit(stacked_x_train, stacked_y_train)
    etc_pred = etc.predict(stacked_x_test)
    etc_acc_score = accuracy_score(stacked_y_test, etc_pred)
    print('ExtraTreesClassifier stacked accuracy score: %.3f' % etc_acc_score)

    rfc = RandomForestClassifier(n_jobs=-1)
    rfc.fit(stacked_x_train, stacked_y_train)
    rfc_pred = rfc.predict(stacked_x_test)
    rfc_acc_score = accuracy_score(stacked_y_test, rfc_pred)
    print('RandomForestClassifier stacked accuracy score: %.3f' %
          rfc_acc_score)
Beispiel #15
0
st.write(
    '''In this challenge, you are asked to predict whether a user will churn after his/her subscription expires. Specifically, we want to forecast if a user make a new service subscription transaction within 30 days after the current membership expiration date.'''
)

col1 = st.beta_columns(1)

# KAPLAN MEIER CURVES
drop_cols = [
    'customer_id', 'bd', 'city', 'reg_month', 'tx_last_date', 'mem_end_date',
    'latest_actual_amount_paid', 'latest_payment_method_id', 'avg_tot_secs',
    'avg_num_unq', 'duration'
]
st.title('Kaplan-Meier Curves')
option = st.selectbox('', [x for x in df.columns if x not in drop_cols])

plt = plotter(df, option, DURATION, EVENT, CategoricalDtype)
KM_plot = st.pyplot(plt)

st.title("Model Summary")
st.write(model.summary)

# COX PROPORTIONAL HAZARDS SUMMARY
#from lifelines import CoxPHFitter
#rossi= load_rossi()

#st.title('Regression Model Summary')
#cph = CoxPHFitter()
#cph.fit(df, duration_col=DURATION, event_col=EVENT)

#st.write("## Coefficients")
#cols = ['coef','exp(coef)', 'exp(coef) lower 95%', 'exp(coef) upper 95%', 'p']
            four_cliques_graph_noise=four_cliques_graph_noise,
            dim_feature=dim_feature,
            num_clusters=num_clusters)
    # identify cluster data
    if cluster_to_idx and idx_to_cluster:
        cluster_data = (cluster_to_idx, idx_to_cluster)
    else:
        cluster_data = None

    # Load a specific agent
    agent = load_agent(algorithm_name=algorithm_name,
                       dim_feature=dim_feature,
                       alpha=alpha,
                       graph=network,
                       cluster_data=cluster_data)
    # the dummy agent for normalization
    agent_normalized = load_agent(algorithm_name='dummy',
                                  dim_feature=dim_feature,
                                  alpha=alpha,
                                  graph=network,
                                  cluster_data=cluster_data)

    # Run the experiment
    regrets = runner(agent=agent,
                     agent_normalized=agent_normalized,
                     user_contexts=user_contexts,
                     time_steps=time_steps)

    # Plot the figure and write the result
    plotter(results=regrets, output_filename=output_filename)
Beispiel #17
0
    ## SET (hyper)parameters
    hyp = hyperParameters()

    ## SET (hyper)parameters for covariance and mean
    hyp.cov = np.array([np.log(67.), np.log(66.), np.log(1.3), np.log(1.0), np.log(2.4), np.log(90.), np.log(2.4), \
                np.log(1.2), np.log(0.66), np.log(0.78), np.log(1.6/12.), np.log(0.18), np.log(0.19)])
    hyp.mean = np.array([])

    sn = 0.1
    hyp.lik = np.array([np.log(sn)])

    #_________________________________
    # STANDARD GP:
    ### TEST POINTS
    xs = np.arange(2004+1./24.,2024-1./24.,1./12.)
    xs = xs.reshape(len(xs),1)

    vargout = gp(hyp,inffunc,meanfunc,covfunc,likfunc,x,y,xs)
    ym = vargout[0]; ys2 = vargout[1]
    m  = vargout[2]; s2  = vargout[3]
    plotter(xs,ym,ys2,x,y)
    
    #vargout = min_wrapper(hyp,gp,'CG',inffunc,meanfunc,covfunc,likfunc,x,y,None,None,True)
    #hyp = vargout[0]
    #vargout = gp(hyp,inffunc,meanfunc,covfunc,likfunc,x,y,xs)
    #ym = vargout[0]; ys2 = vargout[1]
    #m  = vargout[2]; s2  = vargout[3]
    #plotter(xs,ym,ys2,x,y)