def runClassificationExperimentKPartition(foldSize, start, maxK, step):

    print('run experiments for ' + Config.NETWORK)

    allFoldK = pickle.load(open('allFold_2019.pickle', "rb"))
    allTestData = pickle.load(open('allTestData_2019.pickle', "rb"))
    allSentences = pickle.load(open('sentences_2019.pickle', "rb"))

    sentences_Secion = pickle.load(
        open('sentences_Secion_Label_2019.pickle', "rb"))
    sentences_Secion.columns = ['id', 'section']

    for k in range(start, maxK + 1, step):
        for f in range(foldSize):
            print("K = ", k, " F = ", f)
            if os.path.isfile("results_" + str(k) + "_" + str(f) + "_TEST" +
                              str(Config.TEST) + ".pickle"):
                print(
                    'found ', "results_" + str(k) + "_" + str(f) + "_TEST" +
                    str(Config.TEST) + ".pickle")
            else:
                [training_df, classes] = getTrainingData(allFoldK[k], f)

                test_df = allTestData[f]
                sentences = allSentences[allSentences['id'].isin(
                    training_df['SentenceId'])]
                training_df = training_df.rename(columns={'SentenceId': 'id'})
                test_df = test_df.rename(columns={'SentenceId': 'id'})
                training_df = pd.merge(training_df, sentences, on='id')
                training_df = pd.merge(training_df, sentences_Secion, on='id')
                test_df = pd.merge(test_df, sentences_Secion, on='id')
                section_classes = None
                if (Config.USE_SECTION):
                    section_classes = np.unique(
                        training_df['section'].tolist() +
                        test_df['section'].tolist())

                training_df, validation_df = train_test_split(
                    training_df,
                    test_size=Config.TRAIN_TEST_RATIO,
                    stratify=training_df[classes].values,
                    random_state=42)

                dataset = Dataset(training_df,
                                  validation_df,
                                  test_df,
                                  classes,
                                  section_classes,
                                  k,
                                  preTrain=True)

                model = Network()
                if (os.path.isfile("model_" + str(k) + "_" + str(f) + ".h5")):

                    model.loadModel("model_" + str(k) + "_" + str(f) + ".h5")
                else:
                    model.build(dataset)
                    model.run(dataset)
                    model.saveModel("model_" + str(k) + "_" + str(f) + ".h5")

                ids, preds = model.predict(dataset)
                results = []
                for i, pred in enumerate(preds):
                    results.append([
                        'HAN', k, f, dataset.X_test[i][0], pred,
                        dataset.get_Y_test()[i],
                        datetime.datetime.fromtimestamp(
                            time.time()).strftime('%Y-%m-%d %H:%M:%S')
                    ])

                pickle.dump(
                    results,
                    open(
                        "results_" + str(k) + "_" + str(f) + "_TEST" +
                        str(Config.TEST) + ".pickle", "wb"))
                weights = model.getActivationWeights(dataset)
                pickle.dump(
                    weights,
                    open(
                        "weights_" + str(k) + "_" + str(f) + "_TEST" +
                        str(Config.TEST) + ".pickle", "wb"))
예제 #2
0
        d_loss = K.mean(x_real_score - x_fake_ng_score)
        g_loss = K.mean(x_fake_score - x_fake_ng_score)
        return K.mean(grad_pen + d_loss + g_loss)

if __name__ == '__main__':
    batch_size = 128
    init_lr = 1e-5
    img_size = (28, 28, 1)
    dst_img_size = (140, 140)
    latent_dim = 100

    (X_train, Y_train), _ = get_mnist()
    X_train = X_train[Y_train == 8]
    X_train = X_train.astype('float32') / 127.5 - 1
    X_train = np.expand_dims(X_train, 3)
    dataset = Dataset(X_train)
    generator = data_generator(dataset, batch_size=batch_size, shuffle=True)

    d_input = Input(shape=img_size, dtype='float32')
    d_out = discriminator_model(d_input)
    d_model = Model(d_input, d_out)

    g_input = Input(shape=(latent_dim, ), dtype='float32')
    g_out = generator_model(g_input)
    g_model = Model(g_input, g_out)

    x_in = Input(shape=img_size, dtype='float32')
    z_in = Input(shape=(latent_dim,), dtype='float32')

    x_real = x_in
    x_fake = g_model(z_in)
예제 #3
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
from Dataset import Dataset
from GazeGAN import Gaze_GAN
from config.train_options import TrainOptions

opt = TrainOptions().parse()

os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu_id)

if __name__ == "__main__":

    dataset = Dataset(opt)
    gaze_gan = Gaze_GAN(dataset, opt)
    gaze_gan.build_model()
    gaze_gan.test()
예제 #4
0
                     "-1_flagged_StarsMasses_grid.npy")

pos = list(np.arange(0, 1024, 32))
ranges = list(product(pos, repeat=3))
train_data, val_data, test_data = [], [], []
for i in ranges:
    if i[0] <= 416 and i[1] <= 416:  # 14*14*32
        val_data.append(i)
    elif i[0] >= 448 and i[1] >= 448 and i[2] >= 448:  # 18*18*18
        test_data.append(i)
    else:
        train_data.append(i)

data_set = Dataset(val_data,
                   phase=phase,
                   flipBox=True,
                   dm_box=dm_box,
                   ng_box=ng_box,
                   gm_box=gm_box)  # use validation set
params = {"batch_size": 16, "shuffle": True, "num_workers": 20}
generator = data.DataLoader(data_set, **params)

pred_ng = []
tar_ng = []

model.eval()
with torch.no_grad():
    inbox_ranges = list(product(list(np.arange(0, 32, n)), repeat=3))
    for i, (input, target) in enumerate(generator):
        input = input.to(device).float()
        target = target.to(device).float()
        if round == True:
예제 #5
0
    def train(self, sess, x, y_, accuracy, train_step, train_feed_dict,
              test_feed_dict):

        # To view graph: tensorboard --logdir=/Users/ryanzotti/Documents/repos/Self_Driving_RC_Car/tf_visual_data/runs
        tf.scalar_summary('accuracy', accuracy)
        merged = tf.merge_all_summaries()

        tfboard_basedir = '/Users/ryanzotti/Documents/repos/Self_Driving_RC_Car/tf_visual_data/runs/'
        tfboard_run_dir = mkdir_tfboard_run_dir(tfboard_basedir)

        # Archive this script to document model design in event of good results that need to be replicated
        model_file_path = os.path.dirname(
            os.path.realpath(__file__)) + '/' + os.path.basename(__file__)
        cmd = 'cp {model_file} {archive_path}'
        shell_command(
            cmd.format(model_file=model_file_path,
                       archive_path=tfboard_run_dir + '/'))

        sess.run(tf.initialize_all_variables())

        input_file_path = os.path.join(self.data_path, 'data')
        dataset = Dataset(input_file_path=input_file_path,
                          max_sample_records=self.max_sample_records)

        # Not sure what these two lines do
        run_opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_opts_metadata = tf.RunMetadata()

        train_images, train_labels = process_data(
            dataset.get_sample(train=True))
        train_feed_dict[x] = train_images
        train_feed_dict[y_] = train_labels
        train_summary, train_accuracy = sess.run(
            [merged, accuracy],
            feed_dict=train_feed_dict,
            options=run_opts,
            run_metadata=run_opts_metadata)
        test_images, test_labels = process_data(
            dataset.get_sample(train=False))
        test_feed_dict[x] = test_images
        test_feed_dict[y_] = test_labels
        test_summary, test_accuracy = sess.run([merged, accuracy],
                                               feed_dict=test_feed_dict,
                                               options=run_opts,
                                               run_metadata=run_opts_metadata)
        message = "epoch: {0}, training accuracy: {1}, validation accuracy: {2}"
        print(message.format(-1, train_accuracy, test_accuracy))

        for epoch in range(self.epochs):
            train_batches = dataset.get_batches(train=True)
            for batch in train_batches:
                images, labels = process_data(batch)
                train_feed_dict[x] = images
                train_feed_dict[y_] = labels
                train_step.run(feed_dict=train_feed_dict)

            # TODO: remove all this hideous boilerplate
            run_opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_opts_metadata = tf.RunMetadata()
            train_images, train_labels = process_data(
                dataset.get_sample(train=True))
            train_feed_dict[x] = train_images
            train_feed_dict[y_] = train_labels
            train_summary, train_accuracy = sess.run(
                [merged, accuracy],
                feed_dict=train_feed_dict,
                options=run_opts,
                run_metadata=run_opts_metadata)
            test_images, test_labels = process_data(
                dataset.get_sample(train=False))
            test_feed_dict[x] = test_images
            test_feed_dict[y_] = test_labels
            test_summary, test_accuracy = sess.run(
                [merged, accuracy],
                feed_dict=test_feed_dict,
                options=run_opts,
                run_metadata=run_opts_metadata)
            print(message.format(epoch, train_accuracy, test_accuracy))

        # Save the trained model to a file
        saver = tf.train.Saver()
        save_path = saver.save(sess, tfboard_run_dir + "/model.ckpt")

        # Marks unambiguous successful completion to prevent deletion by cleanup script
        shell_command('touch ' + tfboard_run_dir + '/SUCCESS')
예제 #6
0
    ('LES', {
        'ncfile':
        '/Users/romainroehrig/data/LES/AYOTTE/AYOTTE{0}_LES_MESONH_RR.nc'.
        format(subcase),
        'line':
        'k'
    }),
    ('LES_csam', {
        'ncfile':
        '/Users/romainroehrig/data/LES/AYOTTE/AYOTTE{0}_LES_MESONH_RR_csam.nc'.
        format(subcase),
        'line':
        'k.'
    }),
])

references = []
for ref in tmp.keys():
    references.append(
        Dataset(name=ref,
                case='AYOTTE',
                subcase=subcase,
                ncfile=tmp[ref]['ncfile'],
                line=tmp[ref]['line']))

########################################
# Configuration file for AYOTTE/24SC atlas
########################################

diagnostics = config.diagnostics
import GMFlogistic

model_file = "pretrain/ml-1m_GMF_10_neg_1_hr_0.6470_ndcg_0.3714.h5"
dataset_name = "ml-1m"
mf_dim = 64
layers = [512, 256, 128, 64]

reg_layers = [0, 0, 0, 0]
reg_mf = 0

num_factors = 10
regs = [0, 0]

# Loading data
t1 = time()
dataset = Dataset("Data/" + dataset_name)
train, testRatings, testNegatives = dataset.trainMatrix, dataset.testRatings, dataset.testNegatives
num_users, num_items = train.shape
print("Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d" %
      (time() - t1, num_users, num_items, train.nnz, len(testRatings)))

# Get model
model = GMFlogistic.get_model(num_items=num_items,
                              num_users=num_users,
                              latent_dim=num_factors,
                              regs=regs)
model.load_weights(model_file)

# Evaluate performance
print("K\tHR\tNDCG")
for topK in range(1, 10):
예제 #8
0
def train(config):
    if not os.path.exists(config.model_save_path):
        os.makedirs(config.model_save_path)
    if config.use_window:
        dataset = DatasetWindow(
            config.train_data_path,
            config.batch_size,
            config.max_len,
            config.is_cuda_available,
            config.window_size,
            True,
            use_net_accumulation=config.use_net_accumulation)
        dev_dataset = Dataset(config.dev_data_path,
                              1,
                              config.max_len,
                              config.is_cuda_available,
                              False,
                              use_net_accumulation=config.use_net_accumulation)
        # dataset = DatasetWindow(config.test_data_path, config.batch_size, config.max_len, config.is_cuda_available,
        #     config.window_size, False, use_net_accumulation=config.use_net_accumulation)
    else:
        dataset = Dataset(config.train_data_path,
                          config.batch_size,
                          config.max_len,
                          config.is_cuda_available,
                          True,
                          use_net_accumulation=config.use_net_accumulation)
        dev_dataset = Dataset(config.dev_data_path,
                              config.batch_size,
                              config.max_len,
                              config.is_cuda_available,
                              False,
                              use_net_accumulation=config.use_net_accumulation)
    loss_function = MaskedMSELoss(config.is_cuda_available, config.use_window)
    logger = config.logger
    if config.model_type != 'rnnda' and config.model_type != 'rnnda-att1' and config.model_type != 'rnnda-att2' \
            and config.model_type != 'rnnda-att':
        model = model_dict[config.model_type](6, config.hidden_dim,
                                              config.dropout_rate,
                                              config.use_window)
    else:
        model = model_dict[config.model_type](6, config.hidden_dim,
                                              config.window_size)
    if config.continue_train:
        model.load_state_dict(
            torch.load(config.model_save_path + '/' + config.prev_model_name))
    if config.is_cuda_available:
        model = model.cuda()
    optimizer_dict = {'sgd': optim.SGD, 'adam': optim.Adam}
    optim_func = optimizer_dict[config.optimizer_type]
    optimizer = optim_func(model.parameters(), lr=config.learning_rate)
    eval_rmse_loss_function = MaskedRMSELoss(config.is_cuda_available,
                                             use_window=True)
    eval_mae_loss_function = MaskedMAEAndMAPELoss(config.is_cuda_available,
                                                  use_window=True)
    epoch_num = config.epoch_num
    logger_interval = config.logger_interval
    prev_best_loss = 10000
    logger_count, validation_count = 0, 0
    batch_count = 1
    for epoch in range(epoch_num):
        logger.info('===================')
        logger.info('Start Epoch %d', epoch)
        loss_sum = 0
        dataset.new_epoch()

        # objgraph.show_growth()
        for d in tqdm(dataset.generate_batches()):
            # objgraph.show_growth()
            if config.dynamic_lr and batch_count % config.lr_change_batch_interval == 0:
                adjust_learning_rate(config, optimizer, logger)
            model.train()
            train_seq = d[0]
            label = d[1]
            mask = d[2]
            if train_seq is None:
                continue
            optimizer.zero_grad()
            pred = model(train_seq)
            cur_batch_size = train_seq.batch_sizes[0].item()
            logger_count += cur_batch_size
            validation_count += cur_batch_size
            # loss = nn.functional.mse_loss(pred, label)
            loss = loss_function(pred,
                                 label,
                                 mask,
                                 use_window=config.use_window)
            loss.backward()
            optimizer.step()
            loss_sum += loss.data.cpu().numpy()
            batch_count += 1
            logger_count, loss_sum, batch_count = _log_information(
                logger, logger_count, logger_interval, loss_sum, batch_count)
            prev_best_loss, validation_count = _validation(
                config, logger, validation_count, dev_dataset, model,
                loss_function, eval_rmse_loss_function, eval_mae_loss_function,
                prev_best_loss)
        logger.info('Current loss is %.5f', loss_sum / batch_count)
        logger.info('End Epoch %d', epoch)
        logger.info('=====================')
        logger.info('\n\n')
        torch.save(model.state_dict(),
                   config.model_save_path + 'epoch' + str(epoch))
예제 #9
0
                'rating': predictions_unwrapped
            })
        dataframe = dataframe.sort_values(by=['rating'], ascending=[False])

        self.log("{} best items for user#{}:\n{}".format(
            num_items, user, dataframe[:num_items]))

        return dataframe


if __name__ == "__main__":
    preprocessor = DataPreproccessor(
        raw_folder_path=AllOptions.DataOptions.raw_folder_path,
        cache_folder_path=AllOptions.DataOptions.cache_folder_path,
        csv_file_name='Modified_Video_Games_5')
    dataset = Dataset(preprocessor, reconstruct_files=False)

    user_item_recommender = UserItemRecommender(dataset=dataset)

    create_new_model = False
    if create_new_model == True:
        # This is the equivalent of calling all of the below methods in the if statement
        # user_item_recommender('weights_020_0.73loss.hdf5')

        # Build and train the model
        user_item_recommender.build_model()
        training_history = user_item_recommender.train()

        # Evaluate the trained model
        user_item_recommender.evaluate_model()
예제 #10
0
        x_1 = F.relu(self.conv4(x))
        
        x_concat = torch.cat((x,x_1),1) 
        x_2 = F.relu(self.conv5(x_1))
        
        x_concat = torch.cat((x_concat,x_2),1)
        x = self.conv6(x_concat)
        
        return x

######################################################################################################################
if learning_on == 1:
    input_signal = batch_train_data[0:(len(batch_train_data)-window_size)]
    prediction_signal = batch_train_data[window_size:len(batch_train_data)]
        
    training_set = Dataset(input_signal, prediction_signal)  #Create the Dataset from the lists
    trainloader = torch.utils.data.DataLoader(training_set, batch_size=1,shuffle=False)   #Create random batch of size 16 in order to improve the learning of the network.
    
    net = Net()                   #Create a new network
    
    if torch.cuda.device_count() > 1:  #If you want to use many GPUSs
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        net = nn.DataParallel(net)
    
    # net.to(device)
    criterion = nn.MSELoss()   
    # criterion_GPU = criterion.to(device)
    optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
    
    epoch_loss_array = []
    count_epoch = 0
#Edits the current detections
from TypeOneDetections import TypeOneDetections
from Dataset import Dataset

dataset = Dataset()
dataset.detect_type_ones()
예제 #12
0
    init_logging_and_result(args)

    print('--- data generation start ---')
    data_gen_begin = time()

    if args.dataset == 'ali':  # TODO(wgcn96): 切换路径
        path = '/home/wangchen/multi-behavior/ijaci15/sample_version_one/'
        args.b_num = 3
    else:  # TODO(wgcn96): 'beibei'
        path = '/home/wangchen/multi-behavior/beibei/sample_version_one/'
        args.b_num = 2
        # enable b_2_type ...
        args.b_2_type = 'vb'

    if ('BPR' in args.model) or (args.en_MC == 'yes') or (args.model == 'FISM'):
        dataset_all = Dataset(path=path, load_type='dict')
    else:
        dataset_ipv = Dataset(path=path, b_type='ipv')
        dataset_cart = Dataset(path=path, b_type='cart')
        dataset_buy = Dataset(path=path, b_type='buy')
        dataset_all = (dataset_ipv, dataset_cart, dataset_buy)

    print('data generation [%.1f s]' % (time() - data_gen_begin))

    if args.model == 'pure_GMF':
        if args.en_MC == 'yes':
            dataset = dataset_all
        else:
            dataset = dataset_buy
        model = pure_GMF(dataset.num_users, dataset.num_items, args)
        print('num_users:%d   num_items:%d' % (dataset.num_users, dataset.num_items))
예제 #13
0
    tf = {i: j for i, j in tf.items() if j >= 2}
    token2id = {key: i + 1 for i, key in enumerate(tf.keys())}
    token2id['non'] = 0
    id2token = ['non'] + list(tf.keys())

    X_train = [str2id(x, token2id) for x in X_train]
    Y_train = [str2id(y, token2id) for y in Y_train]
    X_val = [str2id(x, token2id) for x in X_val]
    Y_val = [str2id(y, token2id) for y in Y_val]

    X_train = sequence_padding(X_train, max_length=max_length)
    Y_train = sequence_padding(Y_train, max_length=max_length)
    X_val = sequence_padding(X_val, max_length=max_length)
    Y_val = sequence_padding(Y_val, max_length=max_length)

    train_dataset = Dataset(X_train, Y_train, y_tf=ToOneHot(num_classes))
    val_dataset = Dataset(X_val, Y_val, y_tf=ToOneHot(num_classes))
    train_generator = generator(train_dataset, batch_size=train_batch_size, shuffle=True)
    val_generator = generator(val_dataset, batch_size=val_batch_size, shuffle=False)

    text_input = Input(shape=(max_length, ), name='text_input', dtype='int32')
    y_true = Input(shape=(max_length, num_classes), dtype='float32')
    out = Couplet_Model(text_input, vocab_size, hidden_dim, max_length=max_length)
    out = CrossEntropy(-1)([y_true, out])
    model = Model([y_true, text_input], out)
    model.compile(Adam())

    num_train_batches = math.ceil(len(Y_train) / train_batch_size)
    num_val_batches = math.ceil(len(Y_val) / val_batch_size)

    def evaluate(model, in_s):
예제 #14
0
if __name__ == '__main__':

    args = parse_args()
    print(args)
    args_str = get_args_to_string(args)
    args.args_str = args_str
    print args_str
    print('Data loading...')
    t1, t_init = time(), time()
    #pdb.set_trace()
    if args.method.lower() in ['sorecgatitem']:
        dataset = SocialItem_Dataset(args)
    elif args.method.lower() in ['sorecgatuser']:
        dataset = SocialUser_Dataset(args)
    else:
        dataset = Dataset(args)

    params = Parameters(args, dataset)
    print(
        """Load data done [%.1f s]. #user:%d, #item:%d, #dom:%d, #train:%d, #test:%d, #valid:%d"""
        % (time() - t1, params.num_users, params.num_items, params.num_doms,
           params.num_train_instances, params.num_test_instances,
           params.num_valid_instances))
    print('Method: %s' % (params.method))
    if params.method in ['sorecgatitem', 'sorecgatuser']:
        model = Models(params)
    model.define_model()
    model.define_loss('all')
    print("Model definition completed: in %.2fs" % (time() - t1))

    train_step = get_optimizer(params.learn_rate,
예제 #15
0
    regs = eval(args.regs)
    learner = args.learner
    learning_rate = args.lr
    epochs = args.epochs
    batch_size = args.batch_size
    verbose = args.verbose
    init_logging("log/GMF.log")

    evaluation_threads = 1  #mp.cpu_count()
    logging.info("GMF arguments: %s" % (args))
    model_out_file = 'Pretrain/%s_GMF_%d_%d.h5' % (args.dataset, num_factors,
                                                   time())

    # Loading data
    t1 = time()
    dataset = Dataset(args.ratio)
    train, test = dataset.trainMatrix, dataset.testMatrix
    num_users, num_items = train.shape
    user_traininput, item_traininput, trainlabels = get_train_instances(train)
    user_testinput, item_testinput, testlabels = get_train_instances(test)
    logging.info(
        "Load data done [%.1f s]. #user=%d, #item=%d, #train=%d, #test=%d" %
        (time() - t1, num_users, num_items, train.nnz, test.nnz))

    # Build model
    model = get_model(num_users, num_items, num_factors, regs)
    if learner.lower() == "adagrad":
        model.compile(optimizer=Adagrad(lr=learning_rate),
                      loss='mean_squared_error')
    elif learner.lower() == "rmsprop":
        model.compile(optimizer=RMSprop(lr=learning_rate),
예제 #16
0
applianceLabels = dfLabels['Washer Dryer'].tolist()

for train_house in [5]:
    discova = Autoencoder() # Turn off variance for prediction
    discova.construct_model()
    discova.compile_model()
    path = 'weights/ae_0' + str(train_house) + '_washerdryer.hdf5'
    discova.model.load_weights(path)
    print(('Loading weights for House ' + str(train_house)).ljust(40,'.'))

    for test_house in range(1,7):
        load_str = 'Loading test data for House ' + str(test_house)
        print(load_str.ljust(40,'.'))
        applianceLabel = applianceLabels[test_house - 1]
        data = Dataset()
        data.load_house_dataframe(test_house)
        data.add_windows(test_house, '00 mains')
        data.add_windows(test_house, applianceLabel)
        data.add_statistics(test_house)
        print('Predicting'.ljust(40,'.'))
        predRaw = discova.model.predict(data.windows[test_house]['00 mains'])
        print('Aligning by timestep'.ljust(40,'.'))
        predTimestep = data.recover_reverse_diagonals(predRaw)
        print('Taking median'.ljust(40,'.'))
        predMedian = np.median(predTimestep, axis = 1)
        print('Rescaling'.ljust(40,'.'))
        mean = data.means[test_house][applianceLabel]
        std = data.stddevs[test_house][applianceLabel]
        predMedianScaled = predMedian * std + mean
        print('Calculating relative MAE'.ljust(40,'.'))
예제 #17
0
    y_hat = np.sign(y_hat - 0.5)  # convert to [-1,+1] space
    return y_hat[:, 0]


# Compute the loss function (Squared Error)
def get_loss_NN(X, y, W, v):
    H = logsig(W @ X.T)  # H[rxm] r nodes, m samples
    y_hat = logsig(H.T @ v)[:, 0]  # y_hat[mx1] m samples
    loss = 0.5 * np.sum((y_hat - y)**2)
    return loss


if __name__ == "__main__":
    # Initialize a dataset
    num_dataset = DATASET
    data = Dataset(num_dataset)  # Retrieve dataset object
    # Initialize IterReg feature
    algo = ALGO
    logger = IterReg(algo, num_dataset)  # Init logger GDSVM

    # Print Header
    print("-- Using dataset {} | ALGO: {} --".format(num_dataset, algo))

    # Expand feature vectors for bias term
    Xb = np.hstack((np.ones((data.X_tr.shape[0], 1)), data.X_tr))

    # Find number of nodes
    NODES = int(NODES)  # Force int
    if NODES == -1:  # Inherit
        try:
            nodes = logger.load_nodes()
예제 #18
0
def main(unused_argv):
    args = parse_args()
    model_save = args.dir_path
    num_epochs = args.epochs
    batch_size = args.batch_size
    mf_dim = args.num_factors
    layers = eval(args.layers)
    reg_layers = eval(args.reg_layers)
    learning_rate = args.lr
    learner = args.learner
    num_train_neg = args.num_train_neg
    num_test_neg = args.num_test_neg
    dataset = Dataset(args.path + args.dataset)
    feature_arr, train, testRatings = dataset.feature_arr, dataset.trainMatrix, dataset.testRatings
    num_users, num_items = train.shape
    seed = args.seed

    params = {
        'num_users': num_users,
        'num_items': num_items,
        'mf_dim': mf_dim,
        'layers': layers,
        'reg_layers': reg_layers,
        'learning_rate': learning_rate,
        'learner': learner,
        'top_number': args.top_number,
        'num_test_neg': num_test_neg
    }
    model = "NEUMF_PLUS_{:02d}node_{:02d}fac_{:02d}trainneg_{:02d}testneg_{:02d}topK_{}dataset_{}".format(
        layers[0], mf_dim, num_train_neg, num_test_neg, args.top_number,
        args.dataset, str(time()))
    # Create the Estimator
    imp_neuMF_plus_model = tf.estimator.Estimator(
        model_fn=get_neuMF_PLUS_model,
        model_dir=model_save + "Models/new/NeuMF_PLUS/" + model,
        params=params)

    # Set up logging for predictions
    # Log the values in the "Softmax" tensor with label "probabilities"
    tensors_to_log = {"probabilities": "logits_relu"}
    logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                              every_n_iter=50)

    feature_eval, user_eval, item_eval, labels_eval = sample_plus.get_test_negative_instances_ver2(
        train, testRatings, feature_arr, num_test_neg, seed)
    print(item_eval)
    eval_input_fn = tf.estimator.inputs.numpy_input_fn(
        x={
            "user_input": user_eval,
            "item_input": item_eval,
            "feature_input": feature_eval
        },
        y=labels_eval,
        batch_size=num_test_neg + 4,
        num_epochs=1,
        shuffle=False)

    for i in range(num_epochs):
        t1 = time()
        feature_input, user_input, item_input, labels = sample_plus.get_train_instances(
            train, feature_arr, num_train_neg)
        train_input_fn = tf.estimator.inputs.numpy_input_fn(
            x={
                "user_input": user_input,
                "item_input": item_input,
                "feature_input": feature_input
            },
            y=labels,
            batch_size=batch_size,
            num_epochs=1,
            shuffle=True)

        imp_neuMF_plus_model.train(input_fn=train_input_fn,
                                   steps=40000,
                                   hooks=[logging_hook])
        t2 = time()
        print("Finished training model epoch {} in {:.2f} second".format(
            i, t2 - t1))
        eval_results = imp_neuMF_plus_model.evaluate(input_fn=eval_input_fn)
        print("Finished testing model in {:.2f} second".format(time() - t2))
        print(eval_results)
예제 #19
0
CLASSES = 3
DEVICE = torch.device("cuda")

anchor_generator = AnchorGenerator(sizes=((32, 64),),
                                   aspect_ratios=((0.6, 1.0, 1.6),))
backbone = torchvision.models.vgg19(pretrained=False).features
backbone.out_channels = 512
model = FasterRCNN(
    backbone,
    num_classes=CLASSES,
    rpn_anchor_generator=anchor_generator
)
model.load_state_dict(torch.load('models_new/'+'model_'+str(EPOCH)+'.pth'))
model.to(DEVICE)
model.eval()
test_dataset = Dataset(training=False)
test_loader = torch.utils.data.DataLoader(
    test_dataset,
    batch_size=BATCH_SIZE,
    shuffle=False,
    drop_last=False
)
total, tpp, fpp, fnp, tpn, fpn, fnn = [0, 0, 0, 0, 0, 0, 0]
IOUs = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
count = 0
with torch.no_grad():
    for data, target in test_loader:
        count += 1
        print("{:.2f}".format(count/len(test_loader)*100))
        data = data.to(DEVICE)
        target['boxes'] = target['boxes'].to(DEVICE)
예제 #20
0
                        '--matrix_size',
                        help='set the matrix size when using smooth',
                        required=False,
                        default=4)
    args = vars(parser.parse_args())

    RUN_SINGLE_ITERATION = not args['hyperparamsearch']
    checkpoint_filename = args[
        'checkpoint_file'] if RUN_SINGLE_ITERATION else None
    batch_results_file = args['output_file']
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
    # set the input data size
    INPUT_SIZE = args['INPUT_SIZE']
    matrix_size = args['matrix_size']

    train_dataset = Dataset(args['train_dataset_filename'])
    test_dataset = Dataset(args['test_dataset_filename'])

    exception_counter = 0
    iteration_num = 0

    parameter_names = [
        "learning_rate", "training_iters", "batch_size",
        "l2_regularization_penalty", "dropout_keep_prob", "fc1_n_neurons",
        "fc2_1_n_neurons", "fc2_2_n_neurons", "fc2_3_n_neurons",
        "conv1_kernel", "conv2_kernel", "conv3_kernel", "conv1_filters",
        "conv2_filters", "conv3_filters", "conv1_stride", "conv2_stride",
        "conv3_stride", "pool1_kernel", "pool2_kernel", "pool3_kernel",
        "pool1_stride", "pool2_stride", "pool3_stride"
    ]
    parameters = [
def main():
    global audioProcess
    dataset = Dataset()
    
    GPIO.setmode(GPIO.BCM) #use the GPIO numbering
    GPIO.setwarnings(False) # Avoids warning channel is already in use

    button = 18 # GPIO pin 18
    button_led = 17

    GPIO.setup(button, GPIO.IN, pull_up_down=GPIO.PUD_UP) #sets up pin 18 as a button
    GPIO.setup(button_led, GPIO.OUT)
    GPIO.setup(processing_led_1, GPIO.OUT)
    GPIO.setup(processing_led_2, GPIO.OUT)
    GPIO.output(button_led, True)

    i_count = 0 # set up for correct grammar in notification below

    r = sr.Recognizer()
    cont = True
    
    while cont:
        input_state = GPIO.input(button) # primes the button!
        
        if input_state == False:
            
            flashLightsProcess = Process(target=flashLights)
            flashLightsProcess.start()
            if audioProcess:
                try:
                    audioProcess.kill()
                except:
                    pass
                audioProcess = None
            with sr.Microphone() as source:     # mention source it will be either Microphone or audio files.
                print("Speak Anything :")
                audio = r.listen(source)
                text = ""
                finalResponse = {}
                finalResponse['fact'] = "I'm sorry, I couldn't understand you."
                finalResponse['fact_type'] = 't'
                finalResponse['file_name'] = '-2.txt'
                try:
                    text = r.recognize_google(audio)    # use recognizer to convert our audio into text part.
                    print("You said : {}".format(text))
                    if text != 'stop':
                        finalResponse = getFinalResponse(text, dataset)
                        fileName = finalResponse["file_name"]
                    else:
                        cont = False
                except Exception as e:
                    print(e)
                flashLightsProcess.terminate()
                stopLights()
                outputType = finalResponse["fact_type"]
                if cont:
                    if outputType == 'a':
                        playAudio(dataset.getFilePath(finalResponse['fact']))
                    elif outputType == 'r':
                        choice = random.randint(0, len(finalResponse['fact']) - 1)
                        speak(finalResponse, choice = choice)
                    else:
                        speak(finalResponse)
            
            time.sleep(1.0)
    GPIO.cleanup()
예제 #22
0
        packed_padded_out, hidden = self.myenc(packed_padded)
        seq = hidden.view(batch_size, WINDOWWIDTH, self.d_model)
        seq = seq.transpose(0, 1)
        seq = self.pos_enc1(seq * math.sqrt(self.d_model))
        trg = self.embed(trg)
        trg = trg.transpose(0, 1)
        trg = self.pos_enc2(trg * math.sqrt(self.d_model))
        output = self.trans(seq, trg, tgt_mask=trg_mask)
        return self.out(output.transpose(0, 1).contiguous())


if __name__ == '__main__':
    trg_mask = create_masks(WINDOWWIDTH // PREDICT_EVERY_NTH_FRAME -
                            1).to(device)
    print(trg_mask)
    testData = Dataset(loadPath='testDataset.pkl')
    model = Transformer(INP_DIM, NUMLABELS, DMODEL, LAYERS, HEADS, 0)
    model.to(device)
    (model_state, optimizer_state) = torch.load(LOAD_CHECKPOINT_PATH,
                                                map_location=device)
    model.load_state_dict(model_state)
    model.eval()
    translator = Translator(model, 1, WINDOWWIDTH // PREDICT_EVERY_NTH_FRAME)
    translator.to(device)
    translator.eval()
    outputs = []
    for i in range(0, len(testData), BATCH_SIZE):
        xs, ys = getBatch(i, testData, aug=False, size=1)
        hi = ys.view(BATCH_SIZE,
                     WINDOWWIDTH // PREDICT_EVERY_NTH_FRAME)[:, :-1]
        mhats = model(xs, hi, trg_mask).argmax(dim=2)[0]
예제 #23
0
 def make_dataset(self, dataset_location):
     cxt = self.client.context()
     ds = Dataset(self.dv, cxt, dataset_location, reactor)
     return ds
예제 #24
0
def main():
    # parse arguments
    parser = argparse.ArgumentParser(
        description='Pattern Recognition Final Project')
    parser.add_argument('-f',
                        '--filepath',
                        dest='filepath',
                        type=str,
                        required=True)
    parser.add_argument('-t', '--t', dest='t', type=int, required=True)
    parser.add_argument('-e',
                        '--e',
                        dest='error_type',
                        type=str,
                        required=False)
    args = parser.parse_args()

    if args.t < 1:
        print('-t Error:')
        print('# of classifier should larger or equal to 1')
        return

    if args.error_type and args.error_type not in ('FN', 'FP', 'E', 'E+FP',
                                                   'E+FN'):
        print('Please specify the error type: FN or FP or E')
        print('FN: False Negative')
        print('FP: False Positive')
        print('E: Empirical Error')
        print('E+FP: Empirical Error w/ False Positive Error')
        print('E+FN: Empirical Error w/ False Negative Error')
        return

    if not args.error_type:
        args.error_type = 'E'
    """
	Load Dataset
	"""
    # training set
    trainset = Dataset('train')
    if not os.path.exists(args.filepath + '/trainset.pkl'):
        trainset.process_data(args.filepath, 'trainset')
        print('training file loaded')
        save_object(trainset, args.filepath + '/trainset.pkl')
        print('training set saved')

    else:
        with open(args.filepath + '/trainset.pkl', 'rb') as inputfile:
            trainset = pickle.load(inputfile)
        print('training set loaded')

    # testing set
    testset = Dataset('test')
    if not os.path.exists(args.filepath + '/testset.pkl'):
        testset.process_data(args.filepath, 'testset')
        print('testing file loaded')
        save_object(testset, args.filepath + '/testset.pkl')
        print('testing set saved')
    else:
        with open(args.filepath + '/testset.pkl', 'rb') as inputfile:
            testset = pickle.load(inputfile)
        print('testing set loaded')
    """
	Load feature into Classifier
	"""
    # initialize violajones classifier
    classifier = ViolaJones(1)
    if not os.path.exists('./classifier.pkl'):
        classifier.load_feature(trainset, trainset.samples, trainset.pos,
                                trainset.neg)
        print('feature loaded')
        save_object(classifier, './classifier.pkl')
        print('classifier saved')
    else:
        with open('./classifier.pkl', 'rb') as inputfile:
            classifier = pickle.load(inputfile)
        print('classifier loaded')
    """
	Train Classifier
	"""
    classifier.T = int(args.t)
    classifier.train(args.error_type)
    print('training completed')
    save_object(classifier, './classifier_' + str(args.t) + '.pkl')
    print('classifier saved')
    """
	Evaluate Classifier
	"""
    for t in range(int(args.t) - 1, -1, -1):
        TP, FN, FP, TN = classifier.evaluate(trainset, -1)
        TP, FN, FP, TN = classifier.evaluate(testset, -1)
        classifier.clfs.pop()
        classifier.alphas.pop()

    return
bens = full[(full['target'] == 0)]
trmals, temals = splits(mals, train_size=.75)
trbens, tebens = splits(bens, train_size=.75)
del full
train_data = trmals.append(trbens)
val_data = temals.append(tebens)
test_data = pd.read_csv('test.csv')

train_transforms = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=[.485, .456, .406], std=[.229, .224, .225])
])

test_transforms = transforms.Compose([transforms.ToTensor()])

train_set = Dataset('data/train/', train_data, transform=train_transforms)
val_set = Dataset('data/train/', val_data, transform=train_transforms)
test_set = Dataset('data/test/',
                   test_data,
                   transform=test_transforms,
                   train=False)

train_loader = DataLoader(train_set, batch_size=8, shuffle=True, num_workers=4)
test_loader = DataLoader(test_set, batch_size=8, shuffle=True, num_workers=4)
val_loader = DataLoader(val_set, batch_size=8, shuffle=True, num_workers=4)

model = CustomENet()
model.load_state_dict(torch.load('./models/trained/CustomENet.model'))
device = 'cuda' if torch.cuda.is_available() else 'cpu'

print('Using', device + '...')
예제 #26
0
 def fit(self, X):
     self.dataset_ref = Dataset(X)
     self.input_slice()
     self.build()
예제 #27
0
    num_negatives = args.num_neg
    learner = args.learner
    learning_rate = args.lr
    batch_size = args.batch_size
    epochs = args.epochs
    verbose = args.verbose

    topK = 10
    evaluation_threads = 1  #mp.cpu_count()
    print("MLP arguments: %s " % (args))
    model_out_file = 'Pretrain/%s_MLP_%s_%d.h5' % (args.dataset, args.layers,
                                                   time())

    # Loading data
    t1 = time()
    dataset = Dataset(args.path + args.dataset)

    print("Load data done [%.1f s]. " % (time() - t1))
    # Build model

    model = get_model(layers, reg_layers)

    if learner.lower() == "adagrad":
        model.compile(optimizer=Adagrad(lr=learning_rate),
                      loss='binary_crossentropy')
    elif learner.lower() == "rmsprop":
        model.compile(optimizer=RMSprop(lr=learning_rate),
                      loss='binary_crossentropy')
    elif learner.lower() == "adam":
        model.compile(optimizer=Adam(lr=learning_rate),
                      loss='binary_crossentropy')
예제 #28
0
####################################
# References for BOMEX atlas
####################################

tmp = OrderedDict([
    ('LES', {
        'ncfile': '/Users/romainroehrig/data/LES/IHOP/IHOP_LES_MESONH_RR.nc',
        'line': 'k'
    }),
])

references = []
for ref in tmp.keys():
    references.append(
        Dataset(name=ref,
                case='IHOP',
                subcase='REF',
                ncfile=tmp[ref]['ncfile'],
                line=tmp[ref]['line']))

####################################
# Configuration file for BOMEX atlas
####################################

tmin = cdtime.comptime(2002, 6, 14, 12)
tmax = cdtime.comptime(2002, 6, 14, 19)

diagnostics = OrderedDict([
    ("2D_dyn", {
        'head':
        'Dynamics (2D)',
예제 #29
0
def main_worker(device, args):

    global best_acc1

    with open('../Data/preprocessed.json') as json_file:
        datapoints = json.load(json_file)

    random.shuffle(datapoints)

    train_data = datapoints[:int(.8 * (len(datapoints)))]
    val_data = datapoints[len(train_data):int(.9 * (len(datapoints)))]
    test_data = datapoints[len(train_data) + len(val_data):]

    train_IDs, train_labels = generate(train_data)
    val_IDs, val_labels = generate(val_data)
    test_IDs, test_labels = generate(test_data)

    # Data loading code
    train_dataset = Dataset(train_IDs, train_labels)
    val_dataset = Dataset(val_IDs, val_labels)
    test_dataset = Dataset(test_IDs, test_labels)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True)

    # create model
    model = Test_Classifier()
    model = model.to(device)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().to(device)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Testing
    if args.evaluate:
        validate(test_loader, model, criterion, args, device)
        return

    # Training Loop
    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args, device)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args, device)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        # Save the current model
        if (epoch + 1) % args.save_freq == 0 or epoch == args.epochs - 1:
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best)
예제 #30
0
from Dataset import Dataset
from WTA_Hasher import WTAHasher
from kNN_Classifier import kNNClassifier
import numpy as np
import matplotlib.pyplot as plt
import copy

ds_train_dir = "../datasets/pima_indians/data.csv"
results_dir = "../final_results/pima_indians/"
num_k_values = 10

ds_orig = Dataset(ds_train_dir, name='Original Data')
ds_whiten = Dataset(ds_train_dir, whiten=True, name='Whitened Data')

alcohol_datasets = [ds_orig, ds_whiten]

k_values = range(1, num_k_values * 2, 2)
color = ['red', 'blue', 'green', 'black']
labels = ['20%', '50%', '80%', '100%']
folds = ['2-fold', '5-fold', 'N-fold']

for ds in alcohol_datasets:
    train_data_all, test_data = ds.getRandomPercentOfData(0.8)

    # Accuracy for get 20%, 50%, 80% and 100% of the data.
    # Each subset will have
    train_accuracy = [[
        np.zeros(num_k_values),
        np.zeros(num_k_values),
        np.zeros(num_k_values)
    ], [