Beispiel #1
0
def test(label_dict, image_ids, args):
    """
    Test the retrieval system. For each input in image_ids, generate a list of IDs returned for
    that image.

    :param label_dict: Dictionary containing labels (and their confidence) for each image
    :param image_ids: List of image IDs. Each element is assumed to be an entry in the test set,
                      and is located at "pics/<image id>.jpg" in the test path
    :param args: Run-time arguments
    :return: A dictionary with keys equal to the images in the queries list,
             and values a list of image IDs retrieved for that input
    """

    log_header('Starting image retrieval preparations')

    queries = []

    if args.test_image is not None:
        queries.append(args.test_image)
    else:
        logging.info('Generating random test queries')

        number_of_images = len(image_ids)

        # Generate random queries to use in the test procedure
        for i in range(args.k):
            queries.append(image_ids[random.randint(0, number_of_images - 1)])

    # Calculate score for the retrieved images
    calculate_score(label_dict, queries,
                    retrieve_similar_images(queries, args))
Beispiel #2
0
def main(options, logger=None):
    if logger == None:
        logger = Logger("download.log", "download.py", True, True)
    else:
        logger.context = "download.py"
    try:
        query = utils.getQuery()
        logger.context = "download.py"
        if __name__ == "__main__":
            utils.log_header(logger, DESCRIPTION,
                             options.get("verbose", False))

        logger.debug("Options:", options.get("verbose", False))
        for i in options:
            logger.debug(i + ": " + str(options[i]),
                         options.get("verbose", False))
        download_query(query, options, logger)

        # move all downloaded videos in videos directory
        repoDir = utils.repoPath()
        fs = os.listdir(repoDir)
        for f in fs:
            if utils.is_video(f):
                os.rename(f, "videos/" + f)
        logger.success("All files downloaded.",
                       not options.get("silent", False))
    except Exception as e:
        logger.handle_exception(e)
        raise e
Beispiel #3
0
def main(options, logger=Logger()):
    logger.context = "transfer.py"
    VERBOSE = options.get("verbose", False)

    if __name__ == "__main__":
        utils.log_header(logger, utils.DESCRIPTION, VERBOSE)

    for i in utils.getVideos():
        videoPath = utils.videosPath()
        if not (os.listdir(str(Path.home()) + "/" +
                           videoPath).__contains__(i)):
            logger.plain("copying to (home)/" + videoPath + ": " + str(i),
                         VERBOSE)
            shutil.copyfile("videos/" + i,
                            str(Path.home()) + "/" + videoPath + "/" + i)

    for i in utils.getMP3s():
        musicPath = utils.musicPath()
        if not (os.listdir(str(Path.home()) + "/" +
                           musicPath).__contains__(i)):
            logger.plain("copying to (home)/" + musicPath + ": " + str(i),
                         VERBOSE)
            shutil.copyfile("music/mp3/" + i,
                            str(Path.home()) + "/" + musicPath + "/" + i)

    logger.success("Copied all pending files.",
                   not options.get("silent", False))
    logger.save()
Beispiel #4
0
def calculate_score(label_dict, queries, results):
    """
    Calculate score for queried images

    :param label_dict: Dictionary of labels, keys are image IDs
    :param queries: List of image ids to query
    :param results: Retrieved image ids for each image in queries
    :return: Total score
    """

    total_score = 0.0

    log_header('Individual image scores')

    # Calculate score for all images
    for image_id in queries:
        if image_id in results.keys():
            # Run the score function
            image_score = score(label_dict=label_dict,
                                target=image_id,
                                selection=results[image_id])
        else:
            logging.error('No result generated for %s' % image_id)

            image_score = 0.0

        total_score += image_score

        logging.info('%s: %8.6f' % (image_id, image_score))

    log_header('Average score over %d images: %10.8f' %
               (len(queries), total_score / len(queries)))

    return total_score
Beispiel #5
0
def train(args):
    """
    Run the training procedure

    :param args: Run-time arguments
    """

    log_header('Training network')

    train_retriever(args)
Beispiel #6
0
def train_feature_model(label_dict, args):
    """
    Train the feature extractor

    :param label_dict: Dictionary containing labels (and their confidence)
    :param args: Run-time arguments
    """

    log_header('Training feature model')

    run_network(feature_network(args),
                args.feature_model,
                args,
                training_data=(generate_training_batch, {
                    'label_dict': label_dict,
                    'args': args
                }))
Beispiel #7
0
def main(options, logger=Logger()):
    VERBOSE = options.get("verbose", False)
    logger.context = "convert.py"
    if __name__ == "__main__":
        utils.log_header(logger, DESCRIPTION, VERBOSE)
    logger.debug("Bitrate: " + str(options.get("bitrate", "(error)")), VERBOSE)

    utils.convertFiles(options.get("bitrate", 320),
                       options.get("silent", False))
    logger.plain(
        "Converted files. Now extracting thumbnails from videos...", VERBOSE)
    utils.extractThumbnails(options.get("silent", False))
    logger.plain(
        "Extracted thumbnails from videos, now writing them to mp3-files...", VERBOSE)
    utils.writeThumbnails()

    logger.success("All files converted.", not options.get("silent", False))
    logger.save()
Beispiel #8
0
def generate_features(args):
    """
    Generate and save feature values for all images in training path

    :param args: Run-time arguments
    """

    log_header('Generating features')

    images = []

    # Add all images in training path
    for image_path, _, image_id in get_images_in_path(args.train_path):
        images.append((image_path, image_id))

    # Generate features for all images
    run_network(feature_network(args),
                args.feature_model,
                args,
                train=False,
                generating_data=images,
                save_path=args.features)
Beispiel #9
0
def run_network(network,
                model_name,
                args,
                train=True,
                training_data=None,
                value=None,
                generating_data=None,
                save_path=None):
    """
    Run a neural network. Can either train weights, evaluate input values, or generate new features
    for images.

    When generating features, the features are saved in batches to pickle files

    :param network: Network to run
    :param model_name: Name of the checkpoint file. Either for saving or loading weights
    :param args: Run-time arguments
    :param train: Whether to train or evaluate
    :param training_data: Data used for training
    :param value: Value used for evaluating
    :param generating_data: Data used for generating new features
    :param save_path: Path to save new features
    :return: Evaluated value
    """

    # Set up session
    with tf.Session() as sess:
        # Extract variables from network
        x, y, output_layer, cost_function, optimizer = network

        # Initial variables and set up saver
        sess.run(tf.initialize_all_variables())
        saver = tf.train.Saver()

        # Enable training if wanted
        if train:
            logging.info('Training model')

            try:
                # Iterate training epochs
                for epoch in range(args.training_epochs):
                    log_header('Epoch: %d' % epoch)

                    # Iterate batches in epoch
                    for batch in range(0, args.number_of_batches):
                        logging.info('Epoch: %d. Batch: %d' % (epoch, batch))

                        # Generate training batch
                        x_, y_ = training_data[0](**training_data[1])

                        # Run the optimizer
                        sess.run([optimizer, cost_function],
                                 feed_dict={
                                     x: x_,
                                     y: y_
                                 })

                logging.info('Training complete')
            except KeyboardInterrupt:
                logging.error('Training aborted')
            finally:
                logging.info('Saving model')

                # Save trained weights
                saver.save(sess, model_name)

                logging.debug('Model saved to %s' % model_name)
        else:
            # Import and restore trained weights
            saver = tf.train.import_meta_graph('%s.meta' % model_name)
            saver.restore(sess, model_name)

            # Evaluate value if supplied
            if value is not None:
                res = []

                # Evaluate in batches in order not to deplete memory
                for i in range(0, len(value), args.batch_size):
                    output = np.squeeze(
                        sess.run([output_layer],
                                 feed_dict={x: value[i:i + args.batch_size]}))

                    if len(output) != args.batch_size:
                        output = [output]

                    res.extend(output)

                return res

            # Set up batches for saving features
            save_batch_number_of_batches = 50
            save_batch_size = args.batch_size * save_batch_number_of_batches
            total_testing_data = len(generating_data)
            total_save_batches = math.ceil(total_testing_data /
                                           save_batch_size)

            # Iterate every feature saving batch
            for save_batch_number in range(total_save_batches):
                logging.error('Batch %d of %d' %
                              (save_batch_number + 1, total_save_batches))

                # Set up batch variables
                save_batch_offset = save_batch_number * save_batch_size
                save_batch_name = '%s.%d' % (args.features, save_batch_number)
                save_batch_features = {}

                # Iterate smaller batches
                for batch_number in range(save_batch_number_of_batches):
                    batch_offset = save_batch_offset + batch_number * save_batch_number_of_batches

                    if batch_offset >= total_testing_data:
                        break

                    # Extract training batch
                    batch = generating_data[batch_offset:batch_offset +
                                            save_batch_number_of_batches]
                    inputs = []

                    # Pre-process images in batch
                    for image_path, image_id in batch:
                        inputs.append(
                            preprocess_image(
                                '%s/%s.jpg' % (image_path, image_id), args))

                    # Generate features
                    features = np.squeeze(
                        sess.run([output_layer], feed_dict={x: inputs}))

                    # Add features to batch
                    for i in range(len(batch)):
                        _, image_id = batch[i]

                        save_batch_features[image_id] = features[i]

                # Save batch to file
                with open(save_batch_name, 'wb') as f:
                    pickle.dump(save_batch_features, f)
# Start THREAD
#
poll_thread = THREAD(joy_range, logfilename, joy_input)
poll_thread.daemon = True
poll_thread.start()

##############################
# Start DISPLAY
mode = DISPLAY('USB connection', poll_thread, task_info, FPS, fullscreen_on, debug_mode)

mode.task_info['subject_id'] = subject_id

#
# write header
#
log_header(poll_thread.logfile, poll_thread.joy_status, mode.task_info, poll_thread.joy_range, min_median_ref)

#
# Start Labjack
#
if param['start_emg'] or param['run_stimulation'] or param['monitor_emg']:
    import labjack_interface as lji
    lji.reset()
    mode.task_info['description'] = 'start labjack'
    mode.task_info['time'] = datetime.now().strftime("%H:%M:%S.%f")
    log(poll_thread.logfile, poll_thread. joy_status,mode.task_info)

target_on = False

#
# CREATION TARGETS:
Beispiel #11
0
#
poll_thread = THREAD(joy_range, logfilename, joy_input)
poll_thread.daemon = True
poll_thread.start()

##############################
# Start DISPLAY
mode = DISPLAY('USB connection', poll_thread, task_info, FPS, fullscreen_on,
               debug_mode)

mode.task_info['subject_id'] = subject_id

#
# write header
#
log_header(poll_thread.logfile, poll_thread.joy_status, mode.task_info,
           poll_thread.joy_range, min_median_ref)

#
# Start Labjack
#
if param['start_emg'] or param['run_stimulation'] or param['monitor_emg']:
    import labjack_interface as lji
    lji.reset()
    mode.task_info['description'] = 'start labjack'
    mode.task_info['time'] = datetime.now().strftime("%H:%M:%S.%f")
    log(poll_thread.logfile, poll_thread.joy_status, mode.task_info)

target_on = False

#
# CREATION TARGETS:
Beispiel #12
0
#
# Start THREAD
#
poll_thread = THREAD(joy_range, logfilename, joy_input)
poll_thread.daemon = True
poll_thread.start()

#
# Start DISPLAY
#
mode = DISPLAY('USB connection', poll_thread, task_info, FPS, fullscreen_on)

#
# write header
#
log_header(poll_thread.logfile, poll_thread.joy_status, mode.task_info,
           joy_range)

#
# Start Labjack
#
if param['start_emg'] or param['run_stimulation'] or param['monitor_emg']:
    import labjack_interface as lji
    lji.reset()
    mode.task_info['description'] = 'start labjack'
    mode.task_info['time'] = datetime.now().strftime("%H:%M:%S.%f")
    log(poll_thread.logfile, poll_thread.joy_status, mode.task_info)

target_on = False

#
# 6 : PSEUDO RANDOM
Beispiel #13
0
#
# start THREAD
#
poll_thread = THREAD(joy_range, logfilename, joy_input)
poll_thread.daemon = True
poll_thread.start()

#
# start DISPLAY
#
mode = DISPLAY('USB connection', poll_thread, task_info, FPS, fullscreen_on)

#
# write header
#
log_header(poll_thread.logfile, poll_thread.joy_status, mode.task_info)


#
# display calibration
#
mode.display.fill(colors.BLACK)
mode.display_text('CALIBRATION', colors.RED, 100, datetime.now(), 5)
mode.display_text(calibration_string[0], colors.RED, 50, datetime.now(), 2)

running_calibration = True
next_order = True

calibration_string_order = 0

while running_calibration:
Beispiel #14
0
    action="store_true",
    help=
    "when wished format is not availabe, it will be automatically converted by default. This keeps the old format, too."
)
parser.add_argument("--filename",
                    "--fname",
                    nargs="?",
                    type=str,
                    help="filename to save the file under")

arguments = parser.parse_args()
args = vars(arguments)
VERBOSE = args.get("verbose", False)

log = Logger("workflow.log", "workflow.py", True, True)
utils.log_header(log, DESCRIPTION, VERBOSE)

try:
    log.debug("Debug mode activated.", VERBOSE)
    log.debug("Args: ", VERBOSE)
    for i in args:
        log.debug(i + ": " + str(args[i]), VERBOSE)
    if not args.get("test", False):
        download.main(args, logger=log)
    if args.get("facerec", None) != None and not args.get("test", False):
        if "all" in args.get("facerec", []):
            # pass all videos from utils.getVideos() to facerec
            facerec.main({"files": utils.getVideos()})
        else:
            # pass all videos from args to facerec
            facerec.main({"files": args.get("facerec", [])})