Ejemplo n.º 1
0
    metadata = {}       # Output data structure

    with h5.File(files[2], 'r') as h5file:

        # Create a HYDRO/DMO switch
        if "/PartType0" in h5file:
            part_types = [0, 1, 4]
        else:
            part_types = [1]

        # Loop over particle types (hydro/dmo sensitive)
        for part_type in part_types:

            # Read in GroupNumber info
            N_particles = header.data.subfind_particles.NumPart_ThisFile[part_type]
            start, end = split(N_particles)
            GroupNumber[f'PartType{part_type}'] = np.empty(0, dtype=np.int)
            GroupNumber[f'PartType{part_type}'] = np.append(
                GroupNumber[f'PartType{part_type}'],
                np.abs(h5file[f'PartType{part_type}/GroupNumber'][start:end])
            )

            # Generate the metadata in parallel through MPI
            unique, unique_indices, unique_counts = np.unique(
                GroupNumber[f'PartType{part_type}'],
                return_index=True,
                return_counts=True
            )

            # Initialise and allocate metadata entries in each rank
            metadata[f'PartType{part_type}'] = {}
def build_mlp_model(input_shape):
    base_input = Input((input_shape, ))
    x = Dense(1200, activation='relu')(base_input)
    embedding_model = Model(base_input, x, name='embedding')
    return embedding_model


feature_data = read.read()

test_ids = list(feature_data.keys())
all_labels = list(feature_data[test_ids[0]].keys())

for test_id in test_ids:
    for a_label in all_labels:
        train_labels = [a for a in all_labels if a != a_label]
        _train_data, _test_data = read.split(feature_data, test_id)
        _train_data = read.remove_class(_train_data, [a_label])

        _support_data, _test_data = read.support_set_split(_test_data, k_shot)

        _train_data, _train_labels = read.flatten(_train_data)
        _support_data, _support_labels = read.flatten(_support_data)

        _train_data = np.array(_train_data)
        _support_data = np.array(_support_data)

        _train_labels = np.array(_train_labels)
        _support_labels = np.array(_support_labels)

        base_network = build_mlp_model(feature_length)
Ejemplo n.º 3
0
def conv():
    _input = Input(shape=(feature_length, 1))
    x = Conv1D(12, kernel_size=5, activation='relu')(_input)
    x = MaxPooling1D(pool_size=2)(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dense(1200, activation='relu')(x)
    x = BatchNormalization()(x)
    return Model(inputs=_input, outputs=x, name='embedding')


all_features = read.read()
test_ids = list(all_features.keys())

for test_id in test_ids:
    _train_features, _test_features = read.split(all_features, test_id)

    _train_features, _train_labels = read.flatten(_train_features)
    _test_features, _test_labels = read.flatten(_test_features)

    _train_labels_ = np_utils.to_categorical(_train_labels,
                                             len(read.activity_list))
    _test_labels_ = np_utils.to_categorical(_test_labels,
                                            len(read.activity_list))

    _train_features = np.array(_train_features)
    _train_features = np.reshape(
        _train_features, (_train_features.shape[0],
                          _train_features.shape[1] * _train_features.shape[2]))
    _train_features = np.expand_dims(_train_features, 3)
    print(_train_features.shape)