Esempio n. 1
0
def validate(valn_files):
    """Validates ml against valn_files, a list of log file names.

    Returns indices of correct predictions.
    """
    locsv, valsv, labelsv = rfutils.read_data(valn_files, poscount, locidx)
    pred = ml.predict([locsv, valsv])[:, 0]
    return ((pred > 0.7) == labelsv).nonzero()[0]
Esempio n. 2
0
def validate(valn_files):
    """Validates ml against valn_files, a list of log file names.

    Returns indices of correct predictions.
    """
    locsv, valsv, labelsv = rfutils.read_data(valn_files, poscount, locidx)
    pred = ml.predict([locsv, valsv])[:, 0]
    return ((pred > 0.7) == labelsv).nonzero()[0]
Esempio n. 3
0
                           beta_constraint=min_max_norm())(in_vals)
in_locs = Input((poscount, ), name='locs', dtype='uint64')
embed_locs = Embedding(locidx.watermark, embedding_dim,
                       input_length=poscount)(in_locs)
merged = concatenate([embed_locs, normd])
drop = Dropout(dropout_prob[0])(merged)
conv_list = []
for filtsz in filter_sizes:
    tmp = Conv1D(num_filters, filtsz, activation='relu')(drop)
    tmp = Flatten()(MaxPooling1D()(tmp))
    conv_list.append(tmp)
out = Dense(1, activation='sigmoid')(Dense(hidden_dims, activation='relu')(
    Dropout(dropout_prob[1])(concatenate(conv_list))))
ml = Model(inputs=[in_locs, in_vals], outputs=out)
ml.compile(Adam(lr=0.01), metrics=['acc'], loss=binary_crossentropy)
locs, vals, labels = rfutils.read_data(gl, poscount, locidx)


def fit(eps, bsz):
    ml.fit([locs, vals], labels, batch_size=bsz, epochs=eps)


def validate(valn_files):
    """Validates ml against valn_files, a list of log file names.

    Returns indices of correct predictions.
    """
    locsv, valsv, labelsv = rfutils.read_data(valn_files, poscount, locidx)
    pred = ml.predict([locsv, valsv])[:, 0]
    return ((pred > 0.7) == labelsv).nonzero()[0]
Esempio n. 4
0
K.set_floatx('float64')

in_vals = Input((poscount, 1), name='vals', dtype='float64')
normd = BatchNormalization(
    axis=1, gamma_constraint=min_max_norm(),
    beta_constraint=min_max_norm())(in_vals)
in_locs = Input((poscount, ), name='locs', dtype='uint64')
embed_locs = Embedding(
    locidx.watermark, embedding_dim, input_length=poscount)(in_locs)
merged = concatenate([embed_locs, normd])
dense_list = []
for i in range(dense_count):
    dense_list.append(
        Dropout(dropout_prob)(Dense(1, activation='sigmoid')(Flatten()(
            merged))))
mult = multiply(dense_list)
ml = Model(inputs=[in_locs, in_vals], outputs=mult)
ml.compile(optr, metrics=['acc'], loss=mse)

locs, vals, labels = rfutils.read_data(gl, poscount, locidx)


def fit(
        eps=int(sys.argv[1]) if len(sys.argv) > 1 else 1,
        # Large batches tend to cause NaNs in batch normalization.
        bsz=int(sys.argv[2]) if len(sys.argv) > 2 else 50):
    ml.fit([locs, vals], labels, batch_size=bsz, epochs=eps)


fit()