def cos_knn(k, test_data, test_labels, stored_data, stored_target):
    cosim = cosine_similarity(test_data, stored_data)

    top = [(heapq.nlargest((k), range(len(i)), i.take)) for i in cosim]
    top = [[stored_target[j] for j in i[:k]] for i in top]

    pred = [max(set(i), key=i.count) for i in top]
    pred = np.array(pred)

    correct = 0
    for j in range(len(test_labels)):
        if test_labels[j] == pred[j]:
            correct += 1
    read.write_data('tn_conv.csv', 'tn_conv,3nn,'+str(test_ids[int(sys.argv[1])])+','+str(correct/float(len(test_labels))))
        model = Model(input=[input_a, input_b], output=distance)

        model.compile(loss=contrastive_loss, optimizer='adam')

        for x in range(epochs):
            digit_indices = [
                np.where(_train_labels == i)[0] for i in train_labels
            ]
            x_pairs, y_pairs = create_pairs(_train_data, digit_indices,
                                            len(train_labels))
            model.fit([x_pairs[:, 0], x_pairs[:, 1]],
                      y_pairs,
                      verbose=1,
                      batch_size=batch_size,
                      epochs=1)

        _support_preds = base_network.predict(_support_data)

        for _l in list(_test_data[test_id].keys()):
            _test_label_data = _test_data[test_id][_l]
            _test_labels = [_l for i in range(len(_test_label_data))]
            _test_label_data = np.array(_test_label_data)
            _test_labels = np.array(_test_labels)
            _test_preds = base_network.predict(_test_label_data)

            acc = cos_knn(k, _test_preds, _test_labels, _support_preds,
                          _support_labels)
            result = 'sn_mlp,3nn,' + str(test_id) + ',' + str(
                a_label) + ',' + str(_l) + ',' + str(acc)
            read.write_data('sn_mlp_oe.csv', result)

feature_data = read.read()

test_ids = list(feature_data.keys())
for test_id in test_ids:

    _train_data, _test_data = read.split(feature_data, test_id)
    _train_data, _train_labels = read.flatten(_train_data)
    _test_data, _test_labels = read.flatten(_test_data)

    _train_data = np.array(_train_data)
    _test_data = np.array(_test_data)

    _embedding_model, _triplet_model = build_mlp_model((feature_length, ))

    _triplet_model.fit_generator(triplet_generator_minibatch(
        _train_data, _train_labels, mini_batch_size),
                                 steps_per_epoch=steps_per_epoch,
                                 epochs=epochs,
                                 verbose=1)

    _train_preds = _embedding_model.predict(_train_data)
    _test_preds = _embedding_model.predict(_test_data)

    acc = read.cos_knn(k, _test_preds, _test_labels, _train_preds,
                       _train_labels)
    result = 'prototype_tn_mlp, 3nn,' + str(test_id) + ',' + str(acc)
    print(result)
    read.write_data('tn_mlp.csv', result)
        model.compile(loss=contrastive_loss, optimizer='adam')

        for x in range(epochs):
            digit_indices = [
                np.where(_train_labels == i)[0] for i in train_labels
            ]
            x_pairs, y_pairs = create_pairs(_train_data, digit_indices,
                                            len(train_labels))
            model.fit([x_pairs[:, 0], x_pairs[:, 1]],
                      y_pairs,
                      verbose=1,
                      batch_size=batch_size,
                      epochs=1)

        _support_preds = base_network.predict(_support_data)

        for _l in list(_test_data[test_id].keys()):
            _test_label_data = _test_data[test_id][_l]
            _test_labels = [_l for i in range(len(_test_label_data))]
            _test_label_data = np.array(_test_label_data)
            _test_label_data = np.expand_dims(_test_label_data, 3)
            _test_labels = np.array(_test_labels)
            _test_preds = base_network.predict(_test_label_data)

            acc = cos_knn(k, _test_preds, _test_labels, _support_preds,
                          _support_labels)
            result = 'sn_conv,3nn,' + str(test_id) + ',' + str(
                a_label) + ',' + str(_l) + ',' + str(acc)
            read.write_data('psn_conv_oe.csv', result)
    pred = [[thigh_train_data[j] for j in i[:kk]] for i in top]
    pred = np.array(pred)
    pred = np.average(pred, axis=1)
    return pred


feature_data = read.read()
test_ids = list(feature_data.keys())

for test_id in test_ids:
    _train_data, _test_data = read.split(feature_data, [test_id])
    w_train_data, t_train_data, _train_labels = read.flatten(_train_data)
    w_test_data, t_test_data, _test_labels = read.flatten(_test_data)

    w_train_data = np.array(w_train_data)
    t_train_data = np.array(t_train_data)
    train_data = np.concatenate([w_train_data, t_train_data], axis=1)
    print(train_data.shape)
    w_test_data = np.array(w_test_data)

    t_test_data = ed_knn(w_test_data, w_train_data, t_train_data)
    test_data = np.concatenate([w_test_data, t_test_data], axis=1)
    print(test_data.shape)

    cos_acc = read.cos_knn(k, test_data, _test_labels, train_data,
                           _train_labels)
    results = 'euclid_t_translator,' + str(k) + ',' + str(
        kk) + ',cos_acc,' + str(cos_acc)
    print(results)
    read.write_data(results_file, results)
        _support_features, _test_features = read.support_set_split(
            _test_features, k_shot)

        _train_features, _train_labels = flatten(_train_features)
        _support_features, _support_labels = flatten(_support_features)

        id_list = range(len(train_labels))
        activity_id_dict = dict(zip(train_labels, id_list))

        _train_labels_ = []
        for item in _train_labels:
            _train_labels_.append(activity_id_dict.get(item))

        _train_features = np.array(_train_features)
        print(_train_features.shape)

        _support_features = np.array(_support_features)
        print(_support_features.shape)

        # knn evaluation
        for _l in list(_test_features[test_id].keys()):
            _test_label_data = _test_features[test_id][_l]
            _test_labels = [_l for i in range(len(_test_label_data))]
            _test_label_data = np.array(_test_label_data)

            acc = cos_knn(k, _test_label_data, _test_labels, _support_features,
                          _support_labels)
            result = 'knn,' + str(test_id) + ',' + str(a_label) + ',' + str(
                _l) + ',' + str(acc)
            read.write_data('knn_oe.csv', result)
Exemple #7
0
_test_labels = np_utils.to_categorical(_test_labels, len(read.activity_list))

_train_features = np.array(_train_features)
print(_train_features.shape)

_test_features = np.array(_test_features)
print(_test_features.shape)

_model = mlp()
_model.compile(optimizer='adam',
               loss='categorical_crossentropy',
               metrics=['accuracy'])
_model.fit(_train_features,
           _train_labels,
           verbose=1,
           batch_size=batch_size,
           epochs=epochs,
           shuffle=True)
results = _model.evaluate(_test_features,
                          _test_labels,
                          batch_size=batch_size,
                          verbose=0)
print(results)
read.write_data('conv architecture' + ',' + 'window_length:' +
                str(read.window_length) + ',' + 'dct_length:' +
                str(read.dct_length) + ',' + 'increment_ratio:' +
                str(read.increment_ratio) + ',' + 'batch_size:' +
                str(batch_size) + ',' + 'epochs:' + str(epochs) + ',' +
                'test_id:' + str(test_id[0]) + ',' + 'score:' +
                ','.join([str(f) for f in results]))
Exemple #8
0
    classifier = Dense(len(PAMAP.activity_list), activation='softmax')(base)
    _model = Model(inputs=_input_, outputs=classifier, name='classifier')

    _model.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
    _model.fit(_train_features,
               _train_labels_,
               verbose=1,
               batch_size=batch_size,
               epochs=epochs,
               shuffle=True)

    _train_preds = base_network.predict(_train_features)
    _test_preds = base_network.predict(_test_features)

    # classifier evaluation
    results = _model.evaluate(_test_features,
                              _test_labels_,
                              batch_size=batch_size,
                              verbose=0)
    print(results)

    # knn evaluation
    k = 3
    acc = cos_knn(k, _test_preds, _test_labels, _train_preds, _train_labels)

    PAMAP.write_data(
        'conv.csv',
        'score' + ','.join([str(f) for f in results]) + 'knn_acc' + str(acc))
        model.compile(loss=contrastive_loss, optimizer='adam')

        for x in range(epochs):
            digit_indices = [
                np.where(_train_labels == i)[0] for i in train_labels
            ]
            x_pairs, y_pairs = create_pairs(_train_data, digit_indices,
                                            len(train_labels))
            model.fit([x_pairs[:, 0], x_pairs[:, 1]],
                      y_pairs,
                      verbose=1,
                      batch_size=batch_size,
                      epochs=1)

        _support_preds = base_network.predict(_support_data)

        for _l in list(_test_data[test_id].keys()):
            _test_label_data = _test_data[test_id][_l]
            _test_labels = [_l for i in range(len(_test_label_data))]
            _test_label_data = np.array(_test_label_data)
            _test_label_data = np.expand_dims(_test_label_data, 3)
            _test_labels = np.array(_test_labels)
            _test_preds = base_network.predict(_test_label_data)

            acc = read.cos_knn(k, _test_preds, _test_labels, _support_preds,
                               _support_labels)
            result = 'sn_conv, 3nn,' + str(num_test_classes) + ',' + str(
                test_id) + ',' + ','.join([str(t) for t in test_labels
                                           ]) + ',' + str(_l) + ',' + str(acc)
            read.write_data('sn_conv_oe_n.csv', result)
Exemple #10
0
modelinputs.append(targetembedding)
supportlabels = Input((numsupportset, classes_per_set))
modelinputs.append(supportlabels)
knnsimilarity = MatchCosine(nway=classes_per_set,
                            n_samp=samples_per_class)(modelinputs)

model = Model(inputs=[input1, supportlabels], outputs=knnsimilarity)
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit([train_data[0], train_data[1]],
          train_data[2],
          epochs=epochs,
          batch_size=batch_size,
          verbose=1)
score = model.evaluate([test_data[0], test_data[1]],
                       test_data[2],
                       batch_size=batch_size,
                       verbose=1)

print(score)
read.write_data('window_length:' + str(read.window_length) + ',' +
                'dct_length:' + str(read.dct_length) + ',' +
                'increment_ratio:' + str(read.increment_ratio) + ',' +
                'classes_per_set:' + str(classes_per_set) + ',' +
                'samples_per_class:' + str(samples_per_class) + ',' +
                'train_size:' + str(train_size) + ',' + 'batch_size:' +
                str(batch_size) + ',' + 'epochs:' + str(epochs) + ',' +
                'test_id:' + str(test_id[0]) + ',' + 'score:' +
                ','.join([str(f) for f in score]))
        numsupportset = samples_per_class * classes_per_set
        input1 = Input((numsupportset + 1, feature_length))
        modelinputs = []
        base_network = mlp_embedding()
        for lidx in range(numsupportset):
            modelinputs.append(base_network(Lambda(lambda x: x[:, lidx, :])(input1)))
        targetembedding = base_network(Lambda(lambda x: x[:, -1, :])(input1))
        modelinputs.append(targetembedding)
        supportlabels = Input((numsupportset, classes_per_set))
        modelinputs.append(supportlabels)
        knnsimilarity = MatchCosine(nway=classes_per_set, n_samp=samples_per_class)(modelinputs)

        model = Model(inputs=[input1, supportlabels], outputs=knnsimilarity)
        model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
        model.fit([_train_data[0], _train_data[1]], _train_data[2], epochs=epochs, batch_size=batch_size, verbose=1)

        _support_preds = base_network.predict(_support_data)

        for _l in list(_test_data[test_id].keys()):
            _test_label_data = _test_data[test_id][_l]
            _test_labels = [_l for i in range(len(_test_label_data))]
            _test_label_data = np.array(_test_label_data)
            _test_labels = np.array(_test_labels)
            _test_preds = base_network.predict(_test_label_data)

            acc = read.cos_knn(k, _test_preds, _test_labels, _support_preds, _support_labels)
            # result = 'mn_mlp, 3nn,' + str(test_id) + ',' + str(a_label) + ',' + str(_l) + ',' + str(acc)
            result = 'mn_mlp, 3nn,' + str(num_test_classes) + ',' + str(test_id) + ',' + ','.join([str(t) for t in test_labels]) + ',' + str(_l) + ',' + str(acc)
            read.write_data('mn_mlp_oe_n.csv', result)
    embedding, _model = mlp()

    _model.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
    _model.fit(_train_data,
               _train_labels_,
               verbose=1,
               batch_size=batch_size,
               epochs=epochs,
               shuffle=True)

    _train_preds = embedding.predict(_train_data)
    _test_preds = embedding.predict(_test_data)

    # classifier evaluation
    results = _model.evaluate(_test_data,
                              _test_labels_,
                              batch_size=batch_size,
                              verbose=0)
    print(results)

    # knn evaluation
    k = 3
    acc = cos_knn(k, _test_preds, _test_labels, _train_preds, _train_labels)
    print(acc)

    read.write_data(
        'mlp.csv', 'score' + ',' + ','.join([str(f) for f in results]) + ',' +
        'knn_acc' + ',' + str(acc))
    modelinputs = []
    base_network = conv_embedding()
    for lidx in range(numsupportset):
        modelinputs.append(
            base_network(Lambda(lambda x: x[:, lidx, :, :])(input1)))
    targetembedding = base_network(Lambda(lambda x: x[:, -1, :, :])(input1))
    modelinputs.append(targetembedding)
    supportlabels = Input((numsupportset, classes_per_set))
    modelinputs.append(supportlabels)
    knnsimilarity = MatchCosine(nway=classes_per_set,
                                n_samp=samples_per_class)(modelinputs)

    model = Model(inputs=[input1, supportlabels], outputs=knnsimilarity)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.fit([train_data[0], train_data[1]],
              train_data[2],
              epochs=epochs,
              batch_size=batch_size,
              verbose=1)

    _train_preds = base_network.predict(_train_data)
    _test_preds = base_network.predict(_test_data)

    acc = read.cos_knn(k, _test_preds, _test_labels, _train_preds,
                       _train_labels)
    result = 'prototype_mn_conv, 3nn,' + str(test_id) + ',' + str(acc)
    print(result)
    read.write_data('mn_conv.csv', result)
                 c_train_data,
                 verbose=0,
                 epochs=100,
                 shuffle=True)

    c_test_data = ae_model.predict(h_test_data)
    hc_test_data = np.concatenate([h_test_data, c_test_data], axis=1)
    print(ca_test_data.shape)

    cos_acc = read.cos_knn(k, ha_test_data, _test_labels, ha_train_data,
                           _train_labels)
    ha_results.append('ha,' + 'a_translator,' + str(k) + ',cos_acc,' +
                      str(cos_acc))

    cos_acc = read.cos_knn(k, ca_test_data, _test_labels, ca_train_data,
                           _train_labels)
    ca_results.append('ca,' + 'a_translator,' + str(k) + ',cos_acc,' +
                      str(cos_acc))

    cos_acc = read.cos_knn(k, hc_test_data, _test_labels, hc_train_data,
                           _train_labels)
    hc_results.append('hc,' + 'c_translator,' + str(k) + ',cos_acc,' +
                      str(cos_acc))

for item in hc_results:
    read.write_data(results_file, item)
for item in ca_results:
    read.write_data(results_file, item)
for item in ha_results:
    read.write_data(results_file, item)
    _model = Model(inputs=_input_, outputs=classifier, name='classifier')

    _model.compile(optimizer='adam',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
    _model.fit(_train_features,
               _train_labels_,
               verbose=1,
               batch_size=batch_size,
               epochs=epochs,
               shuffle=True)

    _train_preds = base_network.predict(_train_features)
    _test_preds = base_network.predict(_test_features)

    # classifier evaluation
    results = _model.evaluate(_test_features,
                              _test_labels_,
                              batch_size=batch_size,
                              verbose=0)
    print(results)

    # knn evaluation
    acc = read.cos_knn(k, _test_preds, _test_labels, _train_preds,
                       _train_labels)

    read.write_data(
        'conv.csv',
        'score:' + ','.join([str(f)
                             for f in results]) + ',knn_acc,' + str(acc))