embedding_model = Model(base_input, x, name='embedding') return embedding_model feature_data = read.read() test_ids = list(feature_data.keys()) all_labels = list(feature_data[test_ids[0]].keys()) for test_id in test_ids: for a_label in all_labels: train_labels = [a for a in all_labels if a != a_label] _train_data, _test_data = read.split(feature_data, test_id) _train_data = read.remove_class(_train_data, [a_label]) _support_data, _test_data = read.support_set_split(_test_data, k_shot) _train_data, _train_labels = read.flatten(_train_data) _support_data, _support_labels = read.flatten(_support_data) _train_data = np.array(_train_data) _support_data = np.array(_support_data) _train_labels = np.array(_train_labels) _support_labels = np.array(_support_labels) base_network = build_mlp_model(feature_length) input_a = Input(shape=(feature_length, )) input_b = Input(shape=(feature_length, ))
x = Dense(1200, activation='relu')(x) x = BatchNormalization()(x) return Model(inputs=_input, outputs=x, name='embedding') all_features = read.read() test_ids = list(all_features.keys()) all_labels = list(all_features[test_ids[0]].keys()) for test_id in test_ids: for a_label in all_labels: train_labels = [a for a in all_labels if a != a_label] _train_features, _test_features = split(all_features, test_id) _train_features = read.remove_class(_train_features, [a_label]) _support_features, _test_features = read.support_set_split( _test_features, k_shot) _train_features, _train_labels = flatten(_train_features) _support_features, _support_labels = flatten(_support_features) id_list = range(len(train_labels)) activity_id_dict = dict(zip(train_labels, id_list)) _train_labels_ = [] for item in _train_labels: _train_labels_.append(activity_id_dict.get(item)) _train_labels_ = np_utils.to_categorical(_train_labels_, len(train_labels)) _train_features = np.array(_train_features)
x = Dense(1200, activation='relu')(x) return Model(inputs=_input, outputs=x, name='embedding') feature_data = read.read() test_ids = list(feature_data.keys()) all_labels = list(feature_data[test_ids[0]].keys()) for test_id in test_ids: for a_label in all_labels: train_labels = [a for a in all_labels if a != a_label] _train_data, _test_data = read.split(feature_data, test_id) _train_data = read.remove_class(_train_data, [a_label]) _support_data, _test_data = read.support_set_split( _test_data, candidates) _train_data, _train_labels = read.flatten(_train_data) _support_data, _support_labels = read.flatten(_support_data) _train_data = np.array(_train_data) _train_data = np.expand_dims(_train_data, 3) _support_data = np.array(_support_data) _support_data = np.expand_dims(_support_data, 3) _train_labels = np.array(_train_labels) _support_labels = np.array(_support_labels) base_network = build_conv_model() input_a = Input(shape=(feature_length, 1))
test_ids = list(feature_data.keys()) all_labels = list(feature_data[test_ids[0]].keys()) for test_id in test_ids: # for a_label in all_labels: for _int in range(5): test_labels_indices = np.random.choice(len(all_labels), num_test_classes, False) test_labels = [a for ii, a in enumerate(all_labels) if ii in test_labels_indices] print(test_labels) train_labels = [a for ii, a in enumerate(all_labels) if ii not in test_labels_indices] print(train_labels) _train_data, _test_data = read.split(feature_data, test_id) _train_data = read.remove_class(_train_data, test_labels) _train_data = create_train_instances(_train_data) _support_data, _test_data = read.support_set_split(_test_data, samples_per_class) _support_data, _support_labels = read.flatten(_support_data) _support_data = np.array(_support_data) numsupportset = samples_per_class * classes_per_set input1 = Input((numsupportset + 1, feature_length)) modelinputs = [] base_network = mlp_embedding() for lidx in range(numsupportset): modelinputs.append(base_network(Lambda(lambda x: x[:, lidx, :])(input1))) targetembedding = base_network(Lambda(lambda x: x[:, -1, :])(input1)) modelinputs.append(targetembedding) supportlabels = Input((numsupportset, classes_per_set)) modelinputs.append(supportlabels) knnsimilarity = MatchCosine(nway=classes_per_set, n_samp=samples_per_class)(modelinputs)