Example #1
0
                    ])
                    stat = np.average(stat)
                    print('Training accuracy %f' % stat)

            # Decrease the learning by 5 every 10 epoch after 20 epochs at the first learning rate
            if epoch in lr_strat:
                lr /= lr_factor

        coord.request_stop()
        coord.join(threads)

        # copy weights to store network
        save_weights = sess.run(
            [variables_graph[i] for i in range(len(variables_graph))])
        utils_resnet.save_model(save_path + 'model-iteration' + str(nb_cl) +
                                '-%i.pickle' % itera,
                                scope='ResNet18',
                                sess=sess)

    # Reset the graph
    tf.reset_default_graph()

    ## Exemplars management part  ##
    nb_protos_cl = int(np.ceil(
        nb_proto * nb_groups * 1. /
        (itera + 1)))  # Reducing number of exemplars for the previous classes
    files_from_cl = files_train[itera]
    inits, scores, label_batch, loss_class, file_string_batch, op_feature_map = utils_icarl.reading_data_and_preparing_network(
        files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing,
        nb_groups, nb_cl, save_path)

    with tf.Session(config=config) as sess:
Example #2
0
            if (i+1)%80 == 0:
                stat = []
                stat += ([ll in best for ll, best in zip(lab, np.argsort(sc, axis=1)[:, -1:])])
                stat =np.average(stat)
                print('Training accuracy %f' %stat)
        
        # Decrease the learning by 5 every 10 epoch after 20 epochs at the first learning rate
        if epoch in lr_strat:
            lr /= lr_factor

    coord.request_stop()
    coord.join(threads)

    # copy weights to store network
    save_weights = sess.run([variables_graph[i] for i in range(len(variables_graph))])
    utils_resnet.save_model(save_path+'model-iteration'+str(nb_cl)+'-%i.pickle' % itera, scope='ResNet18', sess=sess)
  
  # Reset the graph 
  tf.reset_default_graph()
  
  ## Exemplars management part  ##
  nb_protos_cl  = int(np.ceil(nb_proto*nb_groups*1./(itera+1))) # Reducing number of exemplars for the previous classes
  files_from_cl = files_train[itera]
  inits,scores,label_batch,loss_class,file_string_batch,op_feature_map = utils_icarl.reading_data_and_preparing_network(files_from_cl, gpu, itera, batch_size, train_path, labels_dic, mixing, nb_groups, nb_cl, save_path)
  
  with tf.Session(config=config) as sess:
    coord   = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)
    void3   = sess.run(inits)
    
    # Load the training samples of the current batch of classes in the feature space to apply the herding algorithm