Exemplo n.º 1
0
    def scalar_encoder(self, scalar, length, scale_range):

        result = np.empty(length, dtype=float)

        for i in range(length):
            result[i] = helpers.gaussian(i, helpers.scale(scalar, length, scale_range), self.stddev * length)

        return result
Exemplo n.º 2
0
def scaled_stick_value(gp, axis, invert, deadzone_pct=4):
    stick_value = scale(sdl2.SDL_JoystickGetAxis(gp['gp_object'], axis),
                        gp['stick_range'],
                        (-32768, 32768)
                        ) * invert

    deadzone_range = tuple(n * deadzone_pct / 100.0 for n in (-32768, 32768))

    if min(deadzone_range) < stick_value < max(deadzone_range):
        return 0
    else:
        return int(stick_value)
Exemplo n.º 3
0
def scaled_gamepad_input(gamepad_input_key, output_range):
    """
    Read a value from the gamepad and scale it to the desired output range.

    :param gamepad_input_key: string
    :param output_range: tuple
    :return: int
    """
    if 'btn' in gamepad_input_key:
        scale_src = (0, 1)
    else:
        scale_src = STICK_RANGE
    if gamepad_input_key in gp_state:
        return int(scale(gp_state[gamepad_input_key], scale_src, output_range))
    else:
        return 0
Exemplo n.º 4
0
    def pretransform_dataset(self, dataset, reshape = False):
        max_vector_length = self.settings.getint("LSTM", "max_vector_length")

        dataset_padded = helpers.padding(dataset, max_vector_length)

        assert (dataset_padded.shape[1] == max_vector_length)

        if (self.settings.getboolean("LSTM", "scale_data") == True):
            if (reshape == True):
                dataset_padded = dataset_padded.reshape(-1, 1)

            self.scaler, datasetX = helpers.scale(dataset_padded)

            if (reshape == True):
                datasetX = datasetX.reshape(1, -1)
        else:
            datasetX = dataset_padded

        return datasetX
Exemplo n.º 5
0
def run():
    logging.basicConfig(format='%(message)s',
                        level=logging.INFO,
                        filename='results.log',
                        filemode='w')

    # load the new file
    df = read_csv('pre-processed-in-24-hours.csv',
                  index_col=0,
                  parse_dates=True)

    for cell in [108 * 2 + 1]:  # :
        for epoch in [
                1000,
        ]:  # [1000, 2000, 3000, 4000, 5000]:
            for batch_size in [
                    500,
            ]:  # [500, 1000, 1500]:
                for n_input in [1, 2, 4, 8, 12, 16]:
                    for n_out in range(1, 9):
                        logging.info(
                            "Starting... cell {0}, epoch {1}, batch_size {2}, input {3} and output {4}" \
                                .format(cell
                                        , epoch
                                        , batch_size
                                        , n_input
                                        , n_out))
                        try:
                            logging.info("Training {} {}".format(
                                n_input, n_out))
                            # transform data
                            scaler, data_scaled = scale(df.values)

                            train, test = split_dataset(df.values, n_out)
                            train_scaled, test_scaled = split_dataset(
                                data_scaled, n_out)

                            # restructure into window size
                            train_scaled, test_scaled = restructure_data_by_window(
                                train_scaled, test_scaled, n_out)
                            train, test = restructure_data_by_window(
                                train, test, n_out)

                            # fit model
                            model = build_model(train_scaled, n_input, n_out,
                                                cell, epoch, batch_size)

                            # history is a list by window size
                            history_scaled = [
                                x for x in train_scaled[:n_input, :, :]
                            ]
                            history = [x for x in train[:n_input, :, :]]

                            train_walk_foward_validation(
                                history, history_scaled, model, n_input,
                                scaler, train, train_scaled)

                            predictions_inverted = test_walk_foward_validation(
                                model, n_input, scaler, test, test_scaled,
                                train, train_scaled)

                            logging.info("predictions_inverted: {}".format(
                                predictions_inverted.shape))
                            logging.info("test {}".format(test.shape))

                            data = {
                                'predict':
                                predictions_inverted.reshape(
                                    predictions_inverted.shape[0] *
                                    predictions_inverted.shape[1]),
                                'real':
                                test[:, :, 0].reshape(test[:, :, 0].shape[0] *
                                                      test[:, :, 0].shape[1])
                            }

                            data['time'] = df.index[-data["predict"].shape[0]:]

                            df_plot = pandas.DataFrame.from_dict(data)
                            df_plot.to_csv('plot_results_{0}_{1}.csv'.format(
                                n_input, n_out))
                            plot_results(df_plot)
                            plot_scatter(df_plot)

                        except Exception as e:
                            logging.info(e)
Exemplo n.º 6
0
def train(train_dataloader_X, train_dataloader_Y, 
        test_dataloader_X, test_dataloader_Y, 
        device, n_epochs, balance,
        reconstruction_weight, identity_weight,
        print_every=1, checkpoint_every=10, sample_every=10):
    
    
    # keep track of losses over time
    losses = []

    # Get some fixed data from domains X and Y for sampling. These are images that are held
    # constant throughout training, that allow us to inspect the model's performance.
    fixed_X = next(iter(test_dataloader_X))[0] #test_iter_X.next()[0]
    fixed_Y = next(iter(test_dataloader_Y))[0] #test_iter_Y.next()[0]
    
    # scale to a range -1 to 1
    fixed_X = scale(fixed_X.to(device))
    fixed_Y = scale(fixed_Y.to(device))

    # batches per epoch
    iter_X = iter(train_dataloader_X)
    iter_Y = iter(train_dataloader_Y)
    batches_per_epoch = min(len(iter_X), len(iter_Y))

    for epoch in range(1, n_epochs+1):

        epoch_loss_d_x = 0
        epoch_loss_d_y = 0
        epoch_loss_g = 0

        for _ in range(batches_per_epoch):

            # move images to GPU or CPU depending on what is passed in the device parameter,
            # make sure to scale to a range -1 to 1
            images_X, _ = next(iter_X)
            images_X = scale(images_X.to(device))
            images_Y, _ = next(iter_Y)
            images_Y = scale(images_Y.to(device))


            # ============================================
            #            TRAIN THE DISCRIMINATORS
            # ============================================
            
            ##   First: D_X, real and fake loss components   ##

            # Compute the discriminator losses on real images
            d_x_out = D_X(images_X)
            d_x_loss_real = real_mse_loss(d_x_out)
            
            # Generate fake images that look like domain X based on real images in domain Y
            fake_x = G_YtoX(images_Y)

            # Compute the fake loss for D_X
            d_x_out = D_X(fake_x)
            d_x_loss_fake = fake_mse_loss(d_x_out)
            
            # Compute the total loss
            d_x_loss = d_x_loss_real + d_x_loss_fake
            

            
            ##   Second: D_Y, real and fake loss components   ##
            
            d_y_out = D_Y(images_Y) 
            d_y_real_loss = real_mse_loss(d_y_out)  # D_y disciminator loss on a real Y image
            
            fake_y = G_XtoY(images_X) # generate fake Y image from the real X image
            d_y_out = D_Y(fake_y)
            d_y_fake_loss = fake_mse_loss(d_y_out) # compute D_y loss on a fake Y image
            
            d_y_loss = d_y_real_loss + d_y_fake_loss
            

            d_total_loss = d_x_loss + d_y_loss


            # =========================================
            #            TRAIN THE GENERATORS
            # =========================================

            ##    First: generate fake X images and reconstructed Y images    ##

            # Generate fake images that look like domain X based on real images in domain Y
            fake_x = G_YtoX(images_Y)

            # Compute the generator loss based on domain X
            d_out = D_X(fake_x)
            g_x_loss = real_mse_loss(d_out) # fake X should trick the D_x
            # TODO: consider using MSELoss or SmoothL1Loss (Huber loss)

            # Create a reconstructed y
            y_hat = G_XtoY(fake_x)
                    
            # Compute the cycle consistency loss (the reconstruction loss)
            rec_y_loss = cycle_consistency_loss(images_Y, y_hat, lambda_weight=reconstruction_weight)

            # Conversion from X to X should be an identity mapping
            it_x = G_YtoX(images_X)

            # Compute the identity mapping loss
            it_x_loss = identity_mapping_loss(images_X, it_x, weight=identity_weight)


            ##    Second: generate fake Y images and reconstructed X images    ##
            fake_y = G_XtoY(images_X)
            
            d_out = D_Y(fake_y)
            g_y_loss = real_mse_loss(d_out)  # fake Y should trick the D_y
            
            x_hat = G_YtoX(fake_y)
            
            rec_x_loss = cycle_consistency_loss(images_X, x_hat, lambda_weight=reconstruction_weight)

            it_y = G_XtoY(images_Y)

            it_y_loss = identity_mapping_loss(images_Y, it_y, weight=identity_weight)


            # Add up all generator and reconstructed losses 
            g_total_loss = g_x_loss + g_y_loss + rec_x_loss + rec_y_loss + it_x_loss + it_y_loss
            

            # Perform backprop
            
            if d_total_loss >= balance*g_total_loss:
                d_x_optimizer.zero_grad()
                d_x_loss.backward()
                d_x_optimizer.step()

                d_y_optimizer.zero_grad()
                d_y_loss.backward()
                d_y_optimizer.step()
            
            if g_total_loss >= balance*d_total_loss:
                g_optimizer.zero_grad()
                g_total_loss.backward()
                g_optimizer.step()

            # Gather statistics
            epoch_loss_d_x += d_x_loss.item()
            epoch_loss_d_y += d_y_loss.item()
            epoch_loss_g += g_total_loss.item()


        # Reset the iterators when epoch ends
        iter_X = iter(train_dataloader_X)
        iter_Y = iter(train_dataloader_Y)

        # Print the log info
        if epoch % print_every == 0 or epoch == n_epochs:
            # append real and fake discriminator losses and the generator loss
            losses.append((epoch_loss_d_x, epoch_loss_d_y, epoch_loss_g))
            print('Epoch [{:5d}/{:5d}] | d_X_loss: {:6.4f} | d_Y_loss: {:6.4f} | g_total_loss: {:6.4f}'.format(
                    epoch, n_epochs, epoch_loss_d_x, epoch_loss_d_y, epoch_loss_g))

            
        # Save the generated samples
        if epoch % sample_every == 0 or epoch == n_epochs:
            G_YtoX.eval() # set generators to eval mode for sample generation
            G_XtoY.eval()
            save_samples(epoch, fixed_Y, fixed_X, G_YtoX, G_XtoY, sample_dir='../samples')
            G_YtoX.train()
            G_XtoY.train()

        
        # Save the model parameters
        if epoch % checkpoint_every == 0 or epoch == n_epochs:
            save_checkpoint(G_XtoY, G_YtoX, D_X, D_Y, '../checkpoints')
            export_script_module(G_XtoY, '../artifacts', 'summer_to_winter_{:05d}.sm'.format(epoch))   
            export_script_module(G_YtoX, '../artifacts', 'winter_to_summer_{:05d}.sm'.format(epoch))             

    return losses
Exemplo n.º 7
0
def eval_genomes(robot, genomes, config):
    for genome_id, genome in genomes:

        # Enable the synchronous mode
        vrep.simxSynchronous(settings.CLIENT_ID, True)

        if (vrep.simxStartSimulation(settings.CLIENT_ID,
                                     vrep.simx_opmode_oneshot) == -1):
            print('Failed to start the simulation\n')
            print('Program ended\n')
            return

        robot.chromosome = genome
        robot.wheel_speeds = np.array([])
        robot.sensor_activation = np.array([])
        robot.norm_wheel_speeds = np.array([])
        individual = robot

        start_position = None
        # collistion detection initialization
        errorCode, collision_handle = vrep.simxGetCollisionHandle(
            settings.CLIENT_ID, 'robot_collision', vrep.simx_opmode_blocking)
        collision = False
        first_collision_check = True

        now = datetime.now()
        fitness_agg = np.array([])
        scaled_output = np.array([])
        net = neat.nn.FeedForwardNetwork.create(genome, config)

        id = uuid.uuid1()

        if start_position is None:
            start_position = individual.position

        distance_acc = 0.0
        previous = np.array(start_position)

        collisionDetected, collision = vrep.simxReadCollision(
            settings.CLIENT_ID, collision_handle, vrep.simx_opmode_streaming)

        while not collision and datetime.now() - now < timedelta(
                seconds=settings.RUNTIME):

            # The first simulation step waits for a trigger before being executed
            vrep.simxSynchronousTrigger(settings.CLIENT_ID)

            collisionDetected, collision = vrep.simxReadCollision(
                settings.CLIENT_ID, collision_handle, vrep.simx_opmode_buffer)

            individual.neuro_loop()

            # # Traveled distance calculation
            # current = np.array(individual.position)
            # distance = math.sqrt(((current[0] - previous[0])**2) + ((current[1] - previous[1])**2))
            # distance_acc += distance
            # previous = current

            output = net.activate(individual.sensor_activation)
            # normalize motor wheel wheel_speeds [0.0, 2.0] - robot
            scaled_output = np.array([scale(xi, 0.0, 2.0) for xi in output])

            if settings.DEBUG:
                individual.logger.info('Wheels {}'.format(scaled_output))

            individual.set_motors(*list(scaled_output))

            # After this call, the first simulation step is finished
            vrep.simxGetPingTime(settings.CLIENT_ID)

            # Fitness function; each feature;
            # V - wheel center
            V = f_wheel_center(output[0], output[1])
            if settings.DEBUG:
                individual.logger.info('f_wheel_center {}'.format(V))

            # pleasure - straight movements
            pleasure = f_straight_movements(output[0], output[1])
            if settings.DEBUG:
                individual.logger.info(
                    'f_straight_movements {}'.format(pleasure))

            # pain - closer to an obstacle more pain
            pain = f_pain(individual.sensor_activation)
            if settings.DEBUG: individual.logger.info('f_pain {}'.format(pain))

            #  fitness_t at time stamp
            fitness_t = V * pleasure * pain
            fitness_agg = np.append(fitness_agg, fitness_t)

            # dump individuals data
            if settings.SAVE_DATA:
                with open(settings.PATH_NE + str(id) + '_fitness.txt',
                          'a') as f:
                    f.write(
                        '{0!s},{1},{2},{3},{4},{5},{6},{7},{8}, {9}\n'.format(
                            id, scaled_output[0], scaled_output[1], output[0],
                            output[1], V, pleasure, pain, fitness_t,
                            distance_acc))

        # errorCode, distance = vrep.simxGetFloatSignal(CLIENT_ID, 'distance', vrep.simx_opmode_blocking)
        # aggregate fitness function - traveled distance
        # fitness_aff = [distance_acc]

        # behavarioral fitness function
        fitness_bff = [np.sum(fitness_agg)]

        # tailored fitness function
        fitness = fitness_bff[0]  # * fitness_aff[0]

        # Now send some data to V-REP in a non-blocking fashion:
        vrep.simxAddStatusbarMessage(settings.CLIENT_ID,
                                     'fitness: {}'.format(fitness),
                                     vrep.simx_opmode_oneshot)

        # Before closing the connection to V-REP, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
        vrep.simxGetPingTime(settings.CLIENT_ID)

        print('%s fitness: %f | fitness_bff %f | fitness_aff %f' %
              (str(genome_id), fitness, fitness_bff[0],
               0.0))  # , fitness_aff[0]))

        if (vrep.simxStopSimulation(settings.CLIENT_ID,
                                    settings.OP_MODE) == -1):
            print('Failed to stop the simulation\n')
            print('Program ended\n')
            return

        time.sleep(1)
        genome.fitness = fitness
Exemplo n.º 8
0
def training_loop(G_XtoY,
                  G_YtoX,
                  D_X,
                  D_Y,
                  g_optimizer,
                  d_x_optimizer,
                  d_y_optimizer,
                  dataloader_X,
                  dataloader_Y,
                  test_dataloader_X,
                  test_dataloader_Y,
                  epochs=1000):

    print_every = 1

    # keep track of losses over time
    losses = []

    test_iter_X = iter(test_dataloader_X)
    test_iter_Y = iter(test_dataloader_Y)

    # Get some fixed data from domains X and Y for sampling. These are images that are held
    # constant throughout training, that allow us to inspect the model's performance.
    fixed_X = test_iter_X.next()[0]
    fixed_Y = test_iter_Y.next()[0]
    fixed_X = scale(fixed_X)  # make sure to scale to a range -1 to 1
    fixed_Y = scale(fixed_Y)

    # batches per epoch
    iter_X = iter(dataloader_X)
    iter_Y = iter(dataloader_Y)
    batches_per_epoch = min(len(iter_X), len(iter_Y))

    for epoch in range(1, epochs + 1):

        # Reset iterators for each epoch
        if epoch % batches_per_epoch == 0:
            iter_X = iter(dataloader_X)
            iter_Y = iter(dataloader_Y)

        images_X, _ = iter_X.next()
        images_X = scale(images_X)  # make sure to scale to a range -1 to 1

        images_Y, _ = iter_Y.next()
        images_Y = scale(images_Y)

        # move images to GPU if available (otherwise stay on CPU)
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        images_X = images_X.to(device)
        images_Y = images_Y.to(device)

        # ============================================
        #            TRAIN THE DISCRIMINATORS
        # ============================================

        ##   First: D_X, real and fake loss components   ##

        # Train with real images
        d_x_optimizer.zero_grad()

        # 1. Compute the discriminator losses on real images
        out_x = D_X(images_X)
        D_X_real_loss = real_mse_loss(out_x)

        # Train with fake images

        # 2. Generate fake images that look like domain X based on real images in domain Y
        fake_X = G_YtoX(images_Y)

        # 3. Compute the fake loss for D_X
        out_x = D_X(fake_X)
        D_X_fake_loss = fake_mse_loss(out_x)

        # 4. Compute the total loss and perform backprop
        d_x_loss = D_X_real_loss + D_X_fake_loss
        d_x_loss.backward()
        d_x_optimizer.step()

        ##   Second: D_Y, real and fake loss components   ##

        # Train with real images
        d_y_optimizer.zero_grad()

        # 1. Compute the discriminator losses on real images
        out_y = D_Y(images_Y)
        D_Y_real_loss = real_mse_loss(out_y)

        # Train with fake images

        # 2. Generate fake images that look like domain Y based on real images in domain X
        fake_Y = G_XtoY(images_X)

        # 3. Compute the fake loss for D_Y
        out_y = D_Y(fake_Y)
        D_Y_fake_loss = fake_mse_loss(out_y)

        # 4. Compute the total loss and perform backprop
        d_y_loss = D_Y_real_loss + D_Y_fake_loss
        d_y_loss.backward()
        d_y_optimizer.step()

        # =========================================
        #            TRAIN THE GENERATORS
        # =========================================

        ##    First: generate fake X images and reconstructed Y images    ##
        g_optimizer.zero_grad()

        # 1. Generate fake images that look like domain X based on real images in domain Y
        fake_X = G_YtoX(images_Y)

        # 2. Compute the generator loss based on domain X
        out_x = D_X(fake_X)
        g_YtoX_loss = real_mse_loss(out_x)

        # 3. Create a reconstructed y
        # 4. Compute the cycle consistency loss (the reconstruction loss)
        reconstructed_Y = G_XtoY(fake_X)
        reconstructed_y_loss = cycle_consistency_loss(images_Y,
                                                      reconstructed_Y,
                                                      lambda_weight=10)

        ##    Second: generate fake Y images and reconstructed X images    ##

        # 1. Generate fake images that look like domain Y based on real images in domain X
        fake_Y = G_XtoY(images_X)

        # 2. Compute the generator loss based on domain Y
        out_y = D_Y(fake_Y)
        g_XtoY_loss = real_mse_loss(out_y)

        # 3. Create a reconstructed x
        # 4. Compute the cycle consistency loss (the reconstruction loss)
        reconstructed_X = G_YtoX(fake_Y)
        reconstructed_x_loss = cycle_consistency_loss(images_X,
                                                      reconstructed_X,
                                                      lambda_weight=10)

        # 5. Add up all generator and reconstructed losses and perform backprop
        g_total_loss = g_YtoX_loss + g_XtoY_loss + reconstructed_y_loss + reconstructed_x_loss
        g_total_loss.backward()
        g_optimizer.step()

        # Print the log info
        if epoch % print_every == 0:
            # append real and fake discriminator losses and the generator loss
            losses.append(
                (d_x_loss.item(), d_y_loss.item(), g_total_loss.item()))
            print(
                'Epoch [{:5d}/{:5d}] | d_X_loss: {:6.4f} | d_Y_loss: {:6.4f} | g_total_loss: {:6.4f}'
                .format(epoch, epochs, d_x_loss.item(), d_y_loss.item(),
                        g_total_loss.item()))

        sample_every = 1
        # Save the generated samples
        if epoch % sample_every == 0:
            G_YtoX.eval()  # set generators to eval mode for sample generation
            G_XtoY.eval()
            save_samples(epoch,
                         fixed_Y,
                         fixed_X,
                         G_YtoX,
                         G_XtoY,
                         batch_size=16,
                         sample_dir='samples_cyclegan')
            G_YtoX.train()
            G_XtoY.train()

        # uncomment these lines, if you want to save your model
        checkpoint_every = 1000
        # Save the model parameters
        if epoch % checkpoint_every == 0:
            checkpoint(epoch, G_XtoY, G_YtoX, D_X, D_Y)

    return losses