def test_display_info(self):
     res = utils.send_req_package("atom")
     text = utils.display_info(res)
     self.assertIsInstance(text, str)
     res = utils.send_req_package("atom")
     text = utils.display_info(res, nightly=True)
     self.assertIsInstance(text, str)
    def reward_function(self,
                        states_list,
                        timestep=0,
                        print_states=True,
                        print_additionnal_info=True):
        ########## WORK NEEDED #############
        ### You need to shape the reward ###
        ####################################
        """
        Available information:
        x : horizontal position
        y : vertical position
        angle : angle relative to the vertical (negative = right, positive = left)
        first_leg_contact : Left leg touches ground
        second_leg_contact : Right leg touches ground
        throttle : Throttle intensity
        gimbal : Gimbal angle relative to the rocket axis
        velocity_x : horizontal velocity (negative : going Left, positive : going Right)
        velocity_y : vertical velocity (negative : going Down, positive : going Up)
        angular_velocity : angular velocity (negative : turning anti-clockwise, positive : turning clockwise)
        distance : distance from the center of the ship
        velocity : norm of the velocity vector (velocity_x,velocity_y)
        landed : both legs touching the ground
        landed_full : both legs touching ground for a second (60frames)
        states : dictionnary containing all variables in the state vector. For display purpose
        additionnal_information : dictionnary containing additionnal information. For display purpose

        **Hints**
        Be careful with the sign of the different variables

        Go on and shape the reward !
        """
        # states information extraction
        (
            x,
            y,
            angle,
            first_leg_contact,
            second_leg_contact,
            throttle,
            gimbal,
            velocity_x,
            velocity_y,
            angular_velocity,
            distance,
            velocity,
            landed,
            landed_full,
            states,
            additionnal_information,
        ) = info_extractor(states_list, self.env)

        ######## REWARD SHAPING ###########

        # state variables for reward
        groundcontact = first_leg_contact or second_leg_contact
        reward = 0

        # let's start with rewards in case of failure
        if not landed_full:
            reward = 0 - abs(angle) - abs(x) - abs(throttle) / abs(y) - abs(
                velocity) / abs(y) - (velocity * timestep)
            reward = reward / 100
            print('\ry: {}'.format(y), end='')
            #print('\rflying: {}'.format(reward), end='')
            # if groundcontact:
            #     # case in which the rocket landed (one or both legs), but didn't stabilize (broken).
            #     # -> we set the reward to 0.5 (as ground contact is good), and substract a value depending on angle,
            #     # horizontal distance, velocity and angular velocity, i.e. the variables we want to bring to 0
            #     # (ingredients for a successful landing!). We clip this value to 1, so we don't go under -0.5.
            #     reward = 1 - min(1, (abs(x) - abs(angle) - abs(angular_velocity) -

#                    abs(angle*angular_velocity) - abs(throttle)/abs(y))/100)
#     print('\rlanded improperly: {}'.format(reward), end='')

# else:
#  # case in which the rocket is still flying.
#  # -> we want to incitate the rocket to go towards the center and to stabilize, so we
#  # start from reward = 0 and we substract a value that we want to be minimized. We clip
#  # this value to make sure the reward doesn't go under -1.
#  reward = 0 - (((abs(x) +
#               abs(angle) + abs(angular_velocity) + abs(angle*angular_velocity) +
#               abs(throttle)/abs(y)) / 100) * np.log(timestep))
#  print('\rflying: {}'.format(reward), end='')

        # and now the rewards in case of success
        if landed_full:
            reward = 10000
            print('\rlanded properly: {}'.format(reward), end='')
        # if distance > 0:
        #  # case in which the rocket didn't land in the center.
        #  # -> it's a success: we set the reward to 1 and we substract a value depending on
        #  # the distance from the center of the platform, but not going under 0
        #  reward += 10000 #- abs(x)**2)
        #  print('\rlanded uncentered: {}'.format(reward), end='')
        # else:
        #  # full successful landing, right in the center!
        #  # -> Highest reward, +1
        #  reward += 10000
        #  print('\rlanded perfectly: {}'.format(reward), end='')

        #reward = np.clip(reward, -1, 1) #just in case - normally it should already be clipped above

        display_info(states,
                     additionnal_information,
                     reward,
                     timestep,
                     verbose=False)

        return reward
Exemple #3
0
def main(args):
    # I/O
    config_file = args.config_file
    config = utils.import_file(config_file, 'config')

    trainset = utils.Dataset(config.train_dataset_path)
    testset = utils.Dataset(config.test_dataset_path)

    network = BaseNetwork()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model is not None:
        network.restore_model(config.restore_model, config.restore_scopes)

    # Set up LFW test protocol and load images
    print('Loading images...')
    lfwtest = LFWTest(testset.images)
    lfwtest.init_standard_proto(config.lfw_pairs_file)
    lfwtest.images = utils.preprocess(lfwtest.image_paths,
                                      config,
                                      is_training=False)

    trainset.start_batch_queue(config, True)

    #
    # Main Loop
    #
    print('\nStart Training\nname: %s\n# epochs: %d\nepoch_size: %d\nbatch_size: %d\n'\
        % (config.name, config.num_epochs, config.epoch_size, config.batch_size))
    global_step = 0
    start_time = time.time()

    for epoch in range(config.num_epochs):

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(
                global_step, config)
            batch = trainset.pop_batch_queue()

            wl, sm, global_step = network.train(batch['images'],
                                                batch['labels'], learning_rate,
                                                config.keep_prob)

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                summary_writer.add_summary(sm, global_step=global_step)

        # Testing on LFW
        print('Testing on standard LFW protocol...')
        embeddings = network.extract_feature(lfwtest.images, config.batch_size)
        accuracy_embeddings, threshold_embeddings = lfwtest.test_standard_proto(
            embeddings)
        print('Embeddings Accuracy: %2.4f Threshold %2.3f' %
              (accuracy_embeddings, threshold_embeddings))

        with open(os.path.join(log_dir, 'lfw_result.txt'), 'at') as f:
            f.write('%d\t%.5f\n' % (global_step, accuracy_embeddings))
        summary = tf.Summary()
        summary.value.add(tag='lfw/accuracy', simple_value=accuracy_embeddings)
        summary_writer.add_summary(summary, global_step)

        # Save the model
        network.save_model(log_dir, global_step)
    "Data/movie_plots_processed.csv")  # Read preprocessed data-set
grouped_docs = movie_data.groupby('Genre')["Plot"].apply(
    list)  # Group docs by genre (label)

if mode == "train":
    true_train_labels = convert_to_labels(
        grouped_docs,
        end=doc_limit)  # Convert docs in each class with index of its genre
    save_test_dataset(grouped_docs, doc_limit,
                      TEST_DATA)  # Save the rest of data-set for test
    docs, tokenized_docs = prep_docs(
        grouped_docs, doc_limit,
        word_limit)  # Each class is separated for parameter est.
    merged_docs = merge_documents(
        docs)  # Convert docs to a 1-D array to be ready for tf-idf conversion
    display_info(movie_data, merged_docs, word_limit)
    lp.word_to_vec(
        merged_docs,
        is_test=False)  # Create feature vectors & convert to tf-idf vectors
    nb.estimate_parameters(
        docs, tokenized_docs,
        lp.feature_names)  # Est. parameters for naive bayes algorithm
    vectors = np.asarray(lp.tfidf_vectors.todense())
    pred_labels = nb.predict_class(vectors)  # Predict
    display_result(true_train_labels, pred_labels, movie_data)

elif mode == "test":
    acc_history = [0]
    for i in range(n):
        true_test_labels = read_test_labels(grouped_docs)
        print("Testing with {0} documents...".format(doc_limit))
Exemple #5
0
def main():
    global INPUT_FILE
    global OUTPUT_FILE

    args = argparse.ArgumentParser(description="Vigenère cipher decryption "
                                   "tool.")

    args.add_argument("-e",
                      "--encrypt",
                      help="Encrypt the input file",
                      action="store_true",
                      dest="mode_encrypt")
    args.add_argument('-k',
                      '--key',
                      help="Add custom key for encryption or "
                      "decryption")
    args.add_argument("-d",
                      "--decrypt",
                      help="Decrypt the input file",
                      action="store_true",
                      dest="mode_decrypt")
    args.add_argument("-i",
                      "--input-file",
                      help="The file from which the text"
                      " will be read to be "
                      "encrypted or decrypted")
    args.add_argument("-o",
                      "--output-file",
                      help="The file where the output "
                      "will be saved")
    args.add_argument("-v",
                      "--verbose",
                      help="Enable verbosity",
                      action="store_true")

    parser = args.parse_args()

    if not parser.mode_encrypt and not parser.mode_decrypt:
        display_error("Mode must be set using --encrypt or --decrypt options.")

    if parser.input_file:
        INPUT_FILE = parser.input_file

        try:
            with open(INPUT_FILE, "r") as _:
                pass
        except FileNotFoundError:
            display_error(f"The file '{INPUT_FILE}' does not exist.")
        except PermissionError:
            display_error(f"You do not have enough permissions to read the "
                          f"file '{INPUT_FILE}'")
    else:
        display_error("No input file provided.")

    if parser.output_file:
        OUTPUT_FILE = parser.output_file
    else:
        display_error("No output file provided")

    if parser.mode_encrypt:
        # Creating a new Encryptor object and uploading the plaintext to be
        # encrypted with a random key
        encryptor = Encryptor(parser.verbose)
        encryptor.read_plaintext_from_file(INPUT_FILE)

        if parser.key:
            encryptor.encrypt(parser.key)
        else:
            encryptor.encrypt()
        encryptor.save_ciphertext_to_file(OUTPUT_FILE)

        display_info("Encryption performed successfully.")
        display_info(f"Saved encrypted text to '{OUTPUT_FILE}'")
    else:
        # Now, prepare the decrypting process by creating a new Decryptor
        # object which will crack the key by itself
        enigma = Decryptor(parser.verbose)
        enigma.read_ciphertext_from_file(INPUT_FILE)

        if parser.key:
            decrypted_key = parser.key
            display_verbose(f"Decrypting using custom key "
                            f"[green]{decrypted_key}[/green] of length "
                            f"[yellow]{len(decrypted_key)}[/yellow]")
        else:
            decrypted_key = enigma.crack_key()

        enigma.decrypt(decrypted_key)
        enigma.save_decrypted_plaintext_to_file(OUTPUT_FILE)

        display_info("Decryption performed successfully.")
        display_info(f"Saved decrypted text to '{OUTPUT_FILE}'")
Exemple #6
0
def main(args):
    # I/O
    config_file = args.config_file
    config = utils.import_file(config_file, 'config')

    trainset = utils.Dataset(config.train_dataset_path)
    testset = utils.Dataset(config.test_dataset_path)

    network = SiblingNetwork()
    network.initialize(config, trainset.num_classes)


    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)

    # Set up test protocol and load images
    print('Loading images...')
    testset.separate_template_and_probes()
    testset.images = utils.preprocess(testset.images, config, is_training=False)


    trainset.start_batch_queue(config, True)


    #
    # Main Loop
    #
    print('\nStart Training\nname: %s\n# epochs: %d\nepoch_size: %d\nbatch_size: %d\n'\
        % (config.name, config.num_epochs, config.epoch_size, config.batch_size))
    global_step = 0
    start_time = time.time()

    for epoch in range(config.num_epochs):

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(global_step, config)
            image_batch, label_batch = trainset.pop_batch_queue()
        
            switch_batch = utils.zero_one_switch(len(image_batch))
            wl, sm, global_step = network.train(image_batch, label_batch, switch_batch, learning_rate, config.keep_prob)

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                summary_writer.add_summary(sm, global_step=global_step)

        # Testing
        print('Testing...')
        switch = utils.zero_one_switch(len(testset.images))
        embeddings = network.extract_feature(testset.images, switch, config.batch_size)
        tars, fars, _ = utils.test_roc(embeddings, FARs=[1e-4, 1e-3, 1e-2])
        with open(os.path.join(log_dir,'result.txt'),'at') as f:
            for i in range(len(tars)):
                print('[%d] TAR: %2.4f FAR %2.3f' % (epoch+1, tars[i], fars[i]))
                f.write('[%d] TAR: %2.4f FAR %2.3f\n' % (epoch+1, tars[i], fars[i]))
                summary = tf.Summary()
                summary.value.add(tag='test/tar_%d'%i, simple_value=tars[i])
                summary_writer.add_summary(summary, global_step)

        # Save the model
        network.save_model(log_dir, global_step)
Exemple #7
0
for epoch in range(config.num_epochs):
    # Training
    for step in range(config.epoch_size):
        # Prepare input
        learning_rate = utils.get_updated_learning_rate(global_step, config)
        image_batch, label_batch = trainset.pop_batch_queue()

        wl, sm, global_step = network.train(image_batch, label_batch,
                                            learning_rate, config.keep_prob)

        # Display
        if step % config.summary_interval == 0:
            # visualize.scatter2D(_prelogits[:,:2], _label_batch, _pgrads[0][:,:2])
            duration = time.time() - start_time
            start_time = time.time()
            utils.display_info(epoch, step, duration, wl)
            summary_writer.add_summary(sm, global_step=global_step)

    # Testing
    print('Testing...')
    probe_set.extract_features(network, len(probes))
    gal_set.extract_features(network, len(gal))

    rank1, rank5 = evaluate.identify(log_dir, probe_set, gal_set)
    print('rank-1: %2.3f, rank-5: %2.3f' % (rank1[0], rank5[0]))

    # Output test result
    summary = tf.Summary()
    summary.value.add(tag='identification/rank1', simple_value=rank1[0])
    summary.value.add(tag='identification/rank5', simple_value=rank5[0])
    summary_writer.add_summary(summary, global_step)
Exemple #8
0
    def reward_function(self,
                        states_list,
                        timestep=0,
                        print_states=True,
                        print_additionnal_info=True):
        ########## WORK NEEDED #############
        ### You need to shape the reward ###
        ####################################
        """
        Available information:
        x : horizontal position
        y : vertical position
        angle : angle relative to the vertical (negative = right, positive = left)
        first_leg_contact : Left leg touches ground
        second_leg_contact : Right leg touches ground
        throttle : Throttle intensity
        gimbal : Gimbal angle relative to the rocket axis
        velocity_x : horizontal velocity (negative : going Left, positive : going Right)
        velocity_y : vertical velocity (negative : going Down, positive : going Up)
        angular_velocity : angular velocity (negative : turning anti-clockwise, positive : turning clockwise)
        distance : distance from the center of the ship
        velocity : norm of the velocity vector (velocity_x,velocity_y)
        landed : both legs touching the ground
        landed_full : both legs touching ground for a second (60frames)
        states : dictionnary containing all variables in the state vector. For display purpose
        additionnal_information : dictionnary containing additionnal information. For display purpose

        **Hints**
        Be careful with the sign of the different variables

        Go on and shape the reward !
        """
        # states information extraction
        (
            x,
            y,
            angle,
            first_leg_contact,
            second_leg_contact,
            throttle,
            gimbal,
            velocity_x,
            velocity_y,
            angular_velocity,
            distance,
            velocity,
            landed,
            landed_full,
            states,
            additionnal_information,
        ) = info_extractor(states_list, self.env)

        ######## REWARD SHAPING ###########
        # reward definition (per timestep) : You have to fill it !
        reward = -1

        display_info(states,
                     additionnal_information,
                     reward,
                     timestep,
                     verbose=False)

        return reward
Exemple #9
0
    def reward_function(self,
                        states_list,
                        timestep=0,
                        print_states=True,
                        print_additionnal_info=True):
        ########## WORK NEEDED #############
        ### You need to shape the reward ###
        ####################################
        """
        Available information:
        x : horizontal position
        y : vertical position
        angle : angle relative to the vertical (negative = right, positive = left)
        first_leg_contact : Left leg touches ground
        second_leg_contact : Right leg touches ground
        throttle : Throttle intensity
        gimbal : Gimbal angle relative to the rocket axis
        velocity_x : horizontal velocity (negative : going Left, positive : going Right)
        velocity_y : vertical velocity (negative : going Down, positive : going Up)
        angular_velocity : angular velocity (negative : turning anti-clockwise, positive : turning clockwise)
        distance : distance from the center of the ship
        velocity : norm of the velocity vector (velocity_x,velocity_y)
        landed : both legs touching the ground
        landed_full : both legs touching ground for a second (60frames)
        states : dictionnary containing all variables in the state vector. For display purpose
        additionnal_information : dictionnary containing additionnal information. For display purpose

        **Hints**
        Be careful with the sign of the different variables

        Go on and shape the reward !
        """
        # states information extraction
        (
            x,
            y,
            angle,
            first_leg_contact,
            second_leg_contact,
            throttle,
            gimbal,
            velocity_x,
            velocity_y,
            angular_velocity,
            distance,
            velocity,
            landed,
            landed_full,
            states,
            additionnal_information,
        ) = info_extractor(states_list, self.env)

        #if timestep%10 == 0:
        #    print(f"velocity_y {velocity_y}")
        #    print(f"angle {angle}")
        ######## REWARD SHAPING ###########
        # reward definition (per timestep) : You have to fill it !
        shape = 0
        reward = 0
        # Penalty on ill position
        shape -= \
                 .1 * abs(distance) + \
                 0.5 * abs(velocity) + \
                 5 * abs(angle) + \
                 0.15 * abs(angular_velocity) + \
                 10 * abs(x) + \
                 0.5 * max(velocity_y - y, 0)
        #.1 * max((velocity - y), 0)

        # Reward for partial failure scenarios
        shape += 0.1 * (float(first_leg_contact) + float(second_leg_contact))

        if self.prev_shape is not None:
            reward += shape - self.prev_shape
        self.prev_shape = shape

        reward = np.clip(reward, -1, 1)

        if landed_full:
            reward = 100 - 100 * abs(x)

        display_info(states,
                     additionnal_information,
                     reward,
                     timestep,
                     verbose=False)

        return reward
Exemple #10
0
def train(config_file, counter):
    # I/O
    config = utils.import_file(config_file, 'config')
    splits_path = config.splits_path + '/split{}'.format(counter)

    trainset = utils.Dataset(splits_path + '/train_' + str(config.fold_number) + '.txt')
    trainset.images = utils.preprocess(trainset.images, config, True)

    network = Network()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.compat.v1.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)

    # Load gallery and probe file_list
    print('Loading images...')
    probes = []
    gal = []
    with open(splits_path + '/fold_' + str(config.fold_number) + '/probe_1.txt' ,'r') as f:
        for line in f:
            probes.append(line.strip())

    probe_set = evaluate.ImageSet(probes, config)
    #probe_set.extract_features(network, len(probes))
    #
    with open(splits_path + '/fold_'+ str(config.fold_number) + '/gal_1.txt', 'r') as f:
        for line in f:
            gal.append(line.strip())
    gal_set = evaluate.ImageSet(gal, config)
    #gal_set.extract_features(network, len(gal))

    trainset.start_batch_queue(config, True)

    #
    # Main Loop
    #
    print('\nStart Training\n# epochs: {}\nepoch_size: {}\nbatch_size: {}\n'.\
        format(config.num_epochs, config.epoch_size, config.batch_size))

    global_step = 0
    start_time = time.time()
    for epoch in range(config.num_epochs):
        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(global_step, config)
            image_batch, label_batch = trainset.pop_batch_queue()

            wl, sm, global_step = network.train(image_batch, label_batch, learning_rate, config.keep_prob)

            # Display
            if step % config.summary_interval == 0:
                # visualize.scatter2D(_prelogits[:,:2], _label_batch, _pgrads[0][:,:2])
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                summary_writer.add_summary(sm, global_step=global_step)

        # Testing
        print('Testing...')
        probe_set.extract_features(network, len(probes))
        gal_set.extract_features(network, len(gal))

        rank1, rank5 = evaluate.identify(log_dir, probe_set, gal_set)
        print('rank-1: {:.3f}, rank-5: {:.3f}'.format(rank1[0], rank5[0]))
        
        # Output test result
        summary = tf.Summary()
        summary.value.add(tag='identification/rank1', simple_value=rank1[0])
        summary.value.add(tag='identification/rank5', simple_value=rank5[0])
        summary_writer.add_summary(summary, global_step)

        # Save the model
        network.save_model(log_dir, global_step)
    results_copy = os.path.join('log/result_{}_{}.txt'.format(config.model_version, counter))
    shutil.copyfile(os.path.join(log_dir,'result.txt'), results_copy)