コード例 #1
0
feature_data = read.read()

test_ids = list(feature_data.keys())
all_labels = list(feature_data[test_ids[0]].keys())

for test_id in test_ids:
    for a_label in all_labels:
        train_labels = [a for a in all_labels if a != a_label]
        _train_data, _test_data = read.split(feature_data, test_id)
        _train_data = read.remove_class(_train_data, [a_label])
        train_data = create_train_instances(_train_data)

        _support_data, _test_data = read.support_set_split(
            _test_data, samples_per_class)
        _support_data, _support_labels = read.flatten(_support_data)
        _support_data = np.array(_support_data)
        _support_data = np.expand_dims(_support_data, 3)

        numsupportset = samples_per_class * classes_per_set
        input1 = Input((numsupportset + 1, feature_length, 1))
        modelinputs = []
        base_network = conv_embedding()
        for lidx in range(numsupportset):
            modelinputs.append(
                base_network(Lambda(lambda x: x[:, lidx, :, :])(input1)))
        targetembedding = base_network(
            Lambda(lambda x: x[:, -1, :, :])(input1))
        modelinputs.append(targetembedding)
        supportlabels = Input((numsupportset, classes_per_set))
        modelinputs.append(supportlabels)
コード例 #2
0
    x = MaxPooling1D(pool_size=2)(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dense(1200, activation='relu')(x)
    x = BatchNormalization()(x)
    return Model(inputs=_input, outputs=x, name='embedding')


feature_data = read.read()

test_ids = list(feature_data.keys())
for test_id in test_ids:
    _train_data, _test_data = read.split(feature_data, test_id)
    train_data = create_train_instances(_train_data)

    _train_data, _train_labels = read.flatten(_train_data)
    _test_data, _test_labels = read.flatten(_test_data)
    print(len(_train_data))
    '''_train_data = np.array(_train_data)
    _test_data = np.array(_test_data)
    _train_data = np.expand_dims(_train_data, 3)
    _test_data = np.expand_dims(_test_data, 3)

    numsupportset = samples_per_class * classes_per_set
    input1 = Input((numsupportset + 1, feature_length, 1))

    modelinputs = []
    base_network = conv_embedding()
    for lidx in range(numsupportset):
        modelinputs.append(base_network(Lambda(lambda x: x[:, lidx, :, :])(input1)))
    targetembedding = base_network(Lambda(lambda x: x[:, -1, :, :])(input1))
コード例 #3
0

feature_data = read.read()

test_ids = list(feature_data.keys())
all_labels = list(feature_data[test_ids[0]].keys())

for test_id in test_ids:
    for a_label in all_labels:
        train_labels = [a for a in all_labels if a != a_label]
        _train_data, _test_data = read.split(feature_data, test_id)
        _train_data = read.remove_class(_train_data, [a_label])

        _support_data, _test_data = read.support_set_split(_test_data, candidates)

        _train_data, _train_labels = read.flatten(_train_data)
        _support_data, _support_labels = read.flatten(_support_data)

        _train_data = np.array(_train_data)
        _train_data = np.expand_dims(_train_data, 3)

        _support_data = np.array(_support_data)
        _support_data = np.expand_dims(_support_data, 3)

        _embedding_model, _triplet_model = build_conv_model((feature_length,1))

        _triplet_model.fit_generator(triplet_generator_minibatch(_train_data, _train_labels, mini_batch_size
                                                                 , train_labels),
                                     steps_per_epoch=steps_per_epoch, epochs=epochs, verbose=1)

        _support_preds = _embedding_model.predict(_support_data)
コード例 #4
0
    _input = Input(shape=(180, ))
    encoded = Dense(96, activation='sigmoid')(_input)
    decoded = Dense(180)(encoded)
    return Model(inputs=_input, outputs=decoded)


feature_data = read.read()
test_ids = list(feature_data.keys())

hc_results = []
ca_results = []
ha_results = []

for test_id in test_ids:
    _train_data, _test_data = read.split(feature_data, [test_id])
    h_train_data, c_train_data, a_train_data, _train_labels = read.flatten(
        _train_data)
    h_test_data, c_test_data, a_test_data, _test_labels = read.flatten(
        _test_data)

    h_train_data = np.array(h_train_data)
    c_train_data = np.array(c_train_data)
    a_train_data = np.array(a_train_data)
    hc_train_data = np.concatenate([h_train_data, c_train_data], axis=1)
    print(hc_train_data.shape)
    ca_train_data = np.concatenate([c_train_data, a_train_data], axis=1)
    print(ca_train_data.shape)
    ha_train_data = np.concatenate([h_train_data, a_train_data], axis=1)
    print(ha_train_data.shape)

    h_test_data = np.array(h_test_data)
    c_test_data = np.array(c_test_data)
コード例 #5
0
    x = Conv1D(12, kernel_size=5, activation='relu')(_input)
    x = MaxPooling1D(pool_size=2)(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dense(1200, activation='relu')(x)
    x = BatchNormalization()(x)
    return Model(inputs=_input, outputs=x, name='embedding')


all_features = read.read()
test_ids = list(all_features.keys())

for test_id in test_ids:
    _train_features, _test_features = read.split(all_features, test_id)

    _train_features, _train_labels = read.flatten(_train_features)
    _test_features, _test_labels = read.flatten(_test_features)

    _train_labels_ = np_utils.to_categorical(_train_labels,
                                             len(read.activity_list))
    _test_labels_ = np_utils.to_categorical(_test_labels,
                                            len(read.activity_list))

    _train_features = np.array(_train_features)
    _train_features = np.reshape(
        _train_features, (_train_features.shape[0],
                          _train_features.shape[1] * _train_features.shape[2]))
    _train_features = np.expand_dims(_train_features, 3)
    print(_train_features.shape)

    _test_features = np.array(_test_features)
コード例 #6
0

all_features = read.read()
test_ids = list(all_features.keys())
all_labels = list(all_features[test_ids[0]].keys())

for test_id in test_ids:
    for a_label in all_labels:
        train_labels = [a for a in all_labels if a != a_label]
        _train_features, _test_features = read.split(all_features, test_id)
        _train_features = read.remove_class(_train_features, [a_label])

        _support_features, _test_features = read.support_set_split(
            _test_features, k_shot)

        _train_features, _train_labels = read.flatten(_train_features)
        _support_features, _support_labels = read.flatten(_support_features)

        id_list = range(len(train_labels))
        activity_id_dict = dict(zip(train_labels, id_list))

        _train_labels_ = []
        for item in _train_labels:
            _train_labels_.append(activity_id_dict.get(item))

        _train_labels_ = np_utils.to_categorical(_train_labels_,
                                                 len(train_labels))

        _train_features = np.array(_train_features)
        _train_features = np.reshape(
            _train_features,