Ejemplo n.º 1
0
for itera in range(nb_groups):
    print("Processing network after {} increments\t".format(itera))
    # Evaluation on cumul of classes or original classes
    if is_cumul == 'cumul':
        eval_groups = np.array(range(itera + 1))
    else:
        eval_groups = [0]

    print("Evaluation on batches {} \t".format(eval_groups))
    # Load the evaluation files
    files_from_cl = []
    for i in eval_groups:
        files_from_cl.extend(files_valid[i])

    inits, scores, label_batch, loss_class, file_string_batch, op_feature_map = utils_icarl.reading_data_and_preparing_network(
        files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing,
        nb_groups, nb_cl, save_path)

    with tf.Session(config=config) as sess:
        # Launch the prefetch system
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        sess.run(inits)

        # Evaluation routine
        stat_hb1 = []
        stat_icarl = []
        stat_ncm = []

        for i in range(int(np.ceil(len(files_from_cl) / batch_size))):
            sc, l, loss, files_tmp, feat_map_tmp = sess.run([
Ejemplo n.º 2
0
                                scope='ResNet18',
                                sess=sess)

    # Reset the graph
    tf.reset_default_graph()

    ## Exemplars management part  ##
    #calculer le nombre exige d'exemplaires par classe
    # nb_protos_cl = int(np.ceil(nb_proto * nb_groups * 1. / (itera + 1)))  # Reducing number of exemplars for the previous classes
    # nb_protos_cl = int(np.ceil(nb_proto * nb_groups * 1. / (itera + 1)))  # Reducing number of exemplars for the previous classes
    nb_protos_cl = int(math.ceil(nb_proto / (nb_cl * (itera + 1))))

    #files from cl contains NEW DATA ONLY
    files_from_cl = files_train[itera]
    inits, scores, label_batch, loss_class, file_string_batch, op_feature_map = utils_icarl.reading_data_and_preparing_network(
        images_mean, files_from_cl, device, itera, batch_size, nb_groups,
        nb_cl, save_path)

    with tf.Session(config=config) as sess:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        void3 = sess.run(inits)

        # Load the training samples of the current batch of classes in the feature space to apply the herding algorithm
        Dtot, processed_files, label_dico = utils_icarl.load_class_in_feature_space(
            files_from_cl, batch_size, scores, label_batch, loss_class,
            file_string_batch, op_feature_map, sess)
        processed_files = np.array([x.decode() for x in processed_files])

        # Herding procedure : ranking of the potential exemplars
        print('Exemplars selection starting ...')
Ejemplo n.º 3
0
# Loading the labels
labels_dic, label_names, validation_ground_truth = utils_data.parse_devkit_meta(devkit_path)

# Initialization
acc_list = np.zeros((nb_groups,3))

    
# Load the evaluation files
print("Processing network after {} increments\t".format(itera))
print("Evaluation on batches {} \t".format(eval_groups))
files_from_cl = []
for i in eval_groups:
    files_from_cl.extend(files_valid[i])

inits,scores,label_batch,loss_class,file_string_batch,op_feature_map = utils_icarl.reading_data_and_preparing_network(files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing, nb_groups, nb_cl, save_path) 

with tf.Session(config=config) as sess:
    # Launch the prefetch system
    coord   = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    sess.run(inits)
    
    # Evaluation routine
    stat_hb1     = []
    stat_icarl = []
    stat_ncm     = []
    
    for i in range(int(np.ceil(len(files_from_cl)/batch_size))):
        sc, l , loss,files_tmp,feat_map_tmp = sess.run([scores, label_batch,loss_class,file_string_batch,op_feature_map])
        mapped_prototypes = feat_map_tmp[:,0,0,:]
Ejemplo n.º 4
0
    #get data:
    if is_cumul == 'cumul':
        eval_groups = np.array(range(itera + 1))
    else:
        eval_groups = [0]

    # Load the evaluation files
    extract_files = []
    for i in eval_groups:
        extract_files.extend(files_valid[i])

    destination_path = os.path.join(destination_dir,
                                    'b' + str(itera + 1) + '_weight_bias.tf')
    print('Saving stats in: ' + destination_path)

    inits, scores, label_batch, loss_class, file_string_batch, op_feature_map = utils_icarl.reading_data_and_preparing_network(
        images_mean, extract_files, device, itera, batch_size, S, P, save_path)

    with tf.Session(config=config) as sess:
        # Launch the prefetch system
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        sess.run(inits)

        weights = [
            v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
            if v.name.endswith('ResNet18/fc/W:0')
        ]
        bias = [
            v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
            if v.name.endswith('ResNet18/fc/b:0')
        ]