コード例 #1
0
    elif (datasource
          == 'miniImageNet_embedding') or (datasource
                                           == 'tieredImageNet_embedding'):
        from FC640 import FC640

        net = FC640(dim_output=num_classes_per_task,
                    num_hidden_units=(256, 64),
                    device=device)
    else:
        sys.exit('Unknown dataset')
# endregion

w_shape = net.get_weight_shape()
# print(w_shape)

num_weights = get_num_weights(net)
print('Number of parameters of base model = {0:d}'.format(num_weights))

num_val_tasks = args.num_val_tasks

p_dropout_base = args.p_dropout_base

dst_folder_root = '.'
dst_folder = '{0:s}/BMAML_few_shot/BMAML_{1:s}_{2:d}way_{3:d}shot'.format(
    dst_folder_root, datasource, num_classes_per_task,
    num_training_samples_per_class)
if not os.path.exists(dst_folder):
    os.makedirs(dst_folder)
    print('No folder for storage found')
    print('Make folder to store meta-parameters at')
else:
コード例 #2
0
    def __init__(self, sess, data, args):
        self.sess = sess
        self.data = data
        self.args = args
        self.data_mb_list = U.list_of_minibatches(data, args.bsize)
        self.num_train_mbs = len(self.data_mb_list['X_train'])
        self.num_valid_mbs = len(self.data_mb_list['X_valid'])

        # Obviously assumes we're using MNIST.
        self.x_dim = 28 * 28
        self.num_classes = 10
        self.num_train = self.args.data_stats['num_train']
        self.num_valid = self.args.data_stats['num_valid']
        self.num_test = self.args.data_stats['num_test']

        # Placeholders for input data and (known) targets.
        self.x_BO = tf.placeholder(shape=[None, self.x_dim], dtype=tf.float32)
        self.y_targ_B = tf.placeholder(shape=[None], dtype=tf.int32)

        # Build network for predictions.
        with tf.variable_scope('Classifier'):
            self.y_Bh1 = tf.nn.sigmoid(tf.layers.dense(self.x_BO, 100))
            self.y_pred_BC = tf.layers.dense(self.y_Bh1, self.num_classes)

        self.y_pred_B = tf.cast(tf.argmax(self.y_pred_BC, 1), tf.int32)
        self.correct_preds = tf.equal(self.y_targ_B, self.y_pred_B)
        self.accuracy = U.mean(tf.cast(self.correct_preds, tf.float32))
        self.y_targ_BC = tf.one_hot(indices=self.y_targ_B,
                                    depth=self.num_classes)

        # Handle the weights plus an assignment operator (useful for leapfrogs).
        self.weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        self.num_weights = U.get_num_weights(self.weights)
        self.weights_v = U.vars_to_vector(self.weights)
        self.new_weights_v = tf.placeholder(tf.float32,
                                            shape=[self.num_weights])
        self.update_wts_op = U.set_weights_from_vector(self.weights,
                                                       self.new_weights_v)

        # Construct objective (using our one-hot encoding) and updaters.
        self.loss = U.mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=self.y_targ_BC,
                                                    logits=self.y_pred_BC))

        # Somewhat different, to make it easier to use HMC later.
        # For HMC, we need hyperparameters and their updaters.
        if args.algo == 'hmc':
            self.hmc_updater = HMCUpdater(self.sess, self.args, self.x_BO,
                                          self.y_targ_B, self.y_pred_BC,
                                          self.weights, self.new_weights_v,
                                          self.update_wts_op, self.loss,
                                          self.num_train, self.data_mb_list)
        else:
            self.grads = []
            for w in self.weights:
                grad = tf.gradients(self.loss,
                                    w)[0]  # Extract the only list item.
                self.grads.append(grad)
                self.updaters.append(SGDUpdater(w, grad, args))
                self.train_op = tf.group(
                    *[up.update() for up in self.updaters])

        # View a summary and initialize.
        self._print_summary()
        self.sess.run(tf.global_variables_initializer())
コード例 #3
0
        dim_output=num_classes_per_task,
        num_hidden_units=(128,),
        device=device
    )
elif datasource == 'miniImageNet':
    from ConvNet import ConvNet

    net = ConvNet(
        dim_input=(3, 84, 84),
        dim_output=num_classes_per_task,
        num_filters=[32]*4
    )

w_target_shape = net.get_weight_shape()
# print(w_target_shape)
num_weights = get_num_weights(my_net=net)
print('Number of weights of base model = \t {0:d}'.format(num_weights))

dst_folder_root = '.'

dst_folder = '{0:s}/Amortised_ML_few_shot_meta/Amortised_ML_{1:s}_{2:d}way_{3:d}shot'.format(
    dst_folder_root,
    datasource,
    num_classes_per_task,
    num_training_samples_per_class
)
if not os.path.exists(dst_folder):
    os.makedirs(dst_folder)
    print('Create folder to store weights')
    print(dst_folder)
コード例 #4
0
    s = time.time()
    test(network, device, test_loader)
    e = time.time()
    print("Inference Time:  " + str(e - s))

if args.prune == 1:
    print("=" * 60)
    print("PRUNING")
    print("=" * 60)
    print("")

    name = args.data + '_' + args.load[:-4]
    set_sparsity(network, args.sensitivity, name)
    rule = get_rules("rules/" + name + ".rule")
    fname = args.load[:-4] + '_pruned'
    original_param, o_total = get_num_weights(network, verbose=False)

    pruner = Pruner(rule=rule)
    pruner.prune(model=network, stage=0, update_masks=True, verbose=False)

    if args.init_param == 1:
        network.apply(weights_init_uniform_rule)
        print("\nRe-initialised weights...")

    # prune
    for i in range(args.prune_iter):
        print("")
        print("-" * 60)
        print("PRUNE ITERATION", i)
        print("-" * 60)
        print("")