Exemplo n.º 1
0
def train(train_ncs, predict_ncs, gensim_w2v_model, config):
    # Prepare batches
    X, Y = get_vectors(train_ncs, gensim_w2v_model, config)
    inp_batches, tar_batches = create_batch(
        X, Y, int(config['TRAINING']['BATCH_SIZE']))

    # Set up model and optimization
    input_size = X.shape[1] if int(
        config['GENERAL']['POLY_DEGREE']) == 1 else get_poly_features(
            inp_batches[0], int(config['GENERAL']['POLY_DEGREE'])).shape[1]
    output_size = Y.shape[1]
    model = torch.nn.Linear(input_size, output_size)
    if use_cuda:
        model = model.cuda()
    optimizer = optim.SGD(model.parameters(),
                          lr=float(config['TRAINING']['LEARNING_RATE']))
    criterion = torch.nn.SmoothL1Loss()

    # Train
    logging.info(
        'Training can be stopped by ctrl+c at any time. The program will continue with evaluation'
    )
    num_epochs = int(config['TRAINING']['NUM_EPOCHS'])
    try:
        for ep in range(0, num_epochs):
            epoch_loss = train_epoch(inp_batches, tar_batches, model,
                                     optimizer, criterion, config)
            logging.info('epoch ' + str(ep) + '\tloss ' + str(epoch_loss))
    except KeyboardInterrupt:
        pass
    return model, criterion
Exemplo n.º 2
0
def predict_batch(ncs, gensim_w2v_model, model, criterion, config):

    # Prepare batches
    output_ncs = []
    output_scores = []
    start_index = 0

    X, Y = get_vectors(ncs, gensim_w2v_model, config)
    inp_batches, tar_batches = create_batch(
        X, Y, int(config['TRAINING']['BATCH_SIZE']))

    criterion = torch.nn.SmoothL1Loss(reduce=False, size_average=True)
    logging.info('Scoring batches')
    for i in tqdm.tqdm(range(0, inp_batches.shape[0])):
        Y = tar_batches[i]
        if int(config['GENERAL']['POLY_DEGREE']) > 1:
            X = get_poly_features(inp_batches[i],
                                  int(config['GENERAL']['POLY_DEGREE']))
        else:
            X = inp_batches[i]

        inp = Variable(torch.from_numpy(X))
        tar = Variable(torch.from_numpy(Y))

        if use_cuda:
            inp = inp.cuda()
            tar = tar.cuda()

        out = model(inp.float())
        loss = criterion(out.float(), tar.float())
        end_index = start_index + int(config['TRAINING']['BATCH_SIZE'])

        output_ncs.extend(ncs[start_index:end_index])
        output_scores.extend(loss.tolist())
        start_index += int(config['TRAINING']['BATCH_SIZE'])

    return output_ncs, output_scores
Exemplo n.º 3
0
for i in range(0, 8):
    # open the file in read binary mode
    file = open(
        "C:/Users/lalyor/Documents/Masterarbeit/Run_30_min_8/signal_" + str(i),
        "rb")
    #read the file to numpy array
    arr1 = np.load(file)
    signal += [arr1]
    #close the file
    file.close()

signal = np.array(signal)
train_data = signal[:, 0:(35000 + window_size)]
test_data = signal[:, 35000:(len(signal[0]))]

batch_train_data = f.create_batch(train_data, window_size)
batch_test_data = f.create_batch(test_data, window_size)

batch = f.create_batch(signal, window_size)

######################################################################################################################
angriff_rename_attack_1 = []
angriff_rename_attack_2 = []

for i in range(0, 8):
    # open the file in read binary mode
    file = open(
        "C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Rename_attack/signal_silver"
        + str(i), "rb")
    #read the file to numpy array
    arr1 = np.load(file)
Exemplo n.º 4
0
    #close the file
    file.close()
    # open the file in read binary mode
    file = open(
        "C:/Users/lalyor/Documents/Masterarbeit/Angriff_8/Rename_attack/signal_silver_process_on"
        + str(i), "rb")
    #read the file to numpy array
    arr1 = np.load(file)
    angriff_rename_attack_2 += [arr1]
    #close the file
    file.close()

angriff_rename_attack_1 = np.array(angriff_rename_attack_1)
angriff_rename_attack_2 = np.array(angriff_rename_attack_2)

batch_angriff_rename_attack_1 = f.create_batch(angriff_rename_attack_1,
                                               window_size)
batch_angriff_rename_attack_2 = f.create_batch(angriff_rename_attack_2,
                                               window_size)

pred_rename_attack_1 = [1 for i in range(990)]  #Replay attack
for i in range(113, 990):
    pred_rename_attack_1[i] = -1

pred_rename_attack_2 = [1 for i in range(990)]  #Replay attack
for i in range(267, 990):
    pred_rename_attack_2[i] = -1

batch_pred_rename_attack_1 = f.create_batch(pred_rename_attack_1,
                                            window_size,
                                            pred=1)
batch_pred_rename_attack_2 = f.create_batch(pred_rename_attack_2,
Exemplo n.º 5
0
    eval_ncs, eval_scores, eval_scores_inv = read_eval(args)

    logging.info('Calculating additive score')
    additive_score = weighted_add_score(eval_ncs, gensim_w2v_model)
    write_score(eval_ncs, additive_score, args.p2out + 'additive_scores.csv')

    logging.info('Reading train set')
    ncs = read_ncs(args.p2tc)

    logging.info('Creating vector for training instances')
    X, Y = get_vectors(ncs, gensim_w2v_model)
    if model_config.poly_degree > 1:
        X = get_poly_features(X, model_config.poly_degree)

    logging.info('Creating batches')
    in_batches, tar_batches = create_batch(X, Y, model_config.batch_size)

    logging.info('Creating the regression model')
    model, optimizer, criterion = build_model(X, Y)

    logging.info('Training')
    train(in_batches, tar_batches, model, model_config.nb_epochs, optimizer,
          criterion)

    logging.info('Calculating regression-based scores')
    reg_score = regression_score(eval_ncs, gensim_w2v_model, model)
    write_score(eval_ncs, reg_score, args.p2out + 'reg_scores.csv')

    print('Spearman rho bet. inv human score and regression',
          scipy.stats.spearmanr(reg_score, eval_scores_inv))
    print('Spearman rho bet. inv human score and additive score',