Ejemplo n.º 1
0
def train(cuda=False, resume=False, batch_size=10):
    cudaAvailable = torch.cuda.is_available()

    trainDataet = dataset("Data/train2.txt")
    valDataet = dataset("Data/val2.txt")
    trainDataLoader = DataLoader(dataset=trainDataet,
                                 batch_size=batch_size,
                                 drop_last=True)
    valDataLoader = DataLoader(dataset=valDataet,
                               batch_size=batch_size,
                               drop_last=True)
    if resume:
        model = torch.load('./work-dir/40.pth')
    else:
        model = OurNet(batch_size)
    if cudaAvailable:
        model = model.cuda()
        # device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
        # model.to_device(device)

    loss_fn = nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=0.5,
                                momentum=0.9,
                                weight_decay=1e-5)
    optimizer = torch.optim.Adamax(model.parameters(),
                                   lr=0.1,
                                   weight_decay=1e-4)
    epoches = 1

    train_loop(trainDataLoader, valDataLoader, model, loss_fn, optimizer,
               epoches, cudaAvailable)

    return
Ejemplo n.º 2
0
def main(args):

    print("Loading Training Data:", args.training)
    print("args.featureName:", args.features)
    print("args.PCA:", args.PCA)
    print("args.features:", args.features)
    print("args.imbalance:", args.imbalance)
    print("args.network:", args.network)
    print("args.LR:", args.LR)
    print("args.VLAD_k:", args.VLAD_k)
    print("args.max_epoch:", args.max_epoch)
    print("flush!", flush=True)

    from Dataset import dataset
    my_dataset = dataset()
    my_dataset.loadTrainingDataset(
        path_data=args.training,
        featureName=args.features,
        PCA=args.PCA,
        imbalance=args.imbalance,
        batch_size=args.batch_size,
        window_size_sec=args.WindowSize,
    )
    my_dataset.loadValidationDataset(
        path_data=args.validation,
        featureName=args.features,
        PCA=args.PCA,
        window_size_sec=args.WindowSize,
    )
    my_dataset.loadTestingDataset(
        path_data=args.testing,
        featureName=args.features,
        PCA=args.PCA,
        window_size_sec=args.WindowSize,
    )

    # define Network
    from Network import networkMinutes
    my_network = networkMinutes(my_dataset, args.network, VLAD_k=args.VLAD_k)

    # define Trainer
    from Trainer import Trainer
    my_trainer = Trainer(my_network, my_dataset)
    vals_train, vals_valid, vals_test, model = my_trainer.train(
        epochs=args.max_epoch, learning_rate=args.LR, tflog=args.tflog)
    #vals_train, vals_valid, vals_test, model = 0,0,0,"pippo"
    if (".csv" in args.csv_file and args.jobid >= 0
            and ("BUTTA" not in args.tflog.upper())):
        print("saving results to csv file")
        df = pd.read_csv(args.csv_file, index_col=0)
        df.set_value(args.jobid, "train_mAP", vals_train["mAP"])
        df.set_value(args.jobid, "train_Acc", vals_train["accuracy"])
        df.set_value(args.jobid, "valid_mAP", vals_valid["mAP"])
        df.set_value(args.jobid, "valid_Acc", vals_valid["accuracy"])
        df.set_value(args.jobid, "test_mAP", vals_test["mAP"])
        df.set_value(args.jobid, "test_Acc", vals_test["accuracy"])
        print(model)
        df.set_value(args.jobid, "model", model)
        df.to_csv(args.csv_file, sep=',', encoding='utf-8')
Ejemplo n.º 3
0
def process_dataset_test(data_dir, base, num, pca_comp, topk, test):   
    images = dataset(data_dir, num)
    pca_combined = np.zeros([images.layer, images.layer])
    print(f'ThreadPoolExecutor Initiated for {images.N_img} images with size {images.size} ')
    with ThreadPoolExecutor() as executor: 
        futures = []
        for idx in images.idxs:
            futures.append(executor.submit(lambda x: run_step_covariance(x, images, test), idx))
            print(f" No.{idx} image is loaded")
        cov_combined = np.zeros([images.layer,images.layer])
        result = []
        for future in as_completed(futures):
            result = future.result()
            cov_combined += result
#             print('processed result',result[:5,:5])
        print("Done?", future.done())
        pc_eigens, pc_vals = images.get_pc_eigens(cov_combined, pca_comp)
        print(np.log(pc_vals))
#         for val images.data.items()
#         print('pc_eigens',pc_eigens[0:2,:], np.shape(pc_eigens))
        '''
        futures = []
        for idx in images.idxs:
            futures.append(executor.submit(lambda x: run_step_pca(x, pc_eigens, images, base, pca_comp), idx))
 
        stream_concat = []
        stream_1D = []
        vec = np.zeros(base**pca_comp)    
        print('################################## Preparing 1D frequency vector ###################################')
        
        for future in as_completed(futures):
            stream_1D = future.result()
            print(stream_1D)
  
            stream_concat = np.append(stream_concat,stream_1D)
            for item in stream_1D:
                vec[item] +=1
        print("Done?", future.done())
        print('non-zero in vec', np.sum(vec!=0))            
#             print('processed result',result[[0,1,-2,-1]])        
    print(np.shape(stream_concat))
    print(stream_concat)
    exact_HH = run_exact_count(stream_concat, topk)
    print('################################## Exact Count Done ###################################')
    print('exact_HH',exact_HH[:10])
    return exact_HH, vec, stream_concat
    '''
    return None
Ejemplo n.º 4
0
def process_dataset(data_dir, base, num, pca_comp, topk, test):
    images = dataset(data_dir, num)
    num = images.N_img
    #     pca_combined = np.zeros([images.layer, images.layer])
    print(
        f'Processing {num} [test :{test}] images with original size {images.size} '
    )
    with ThreadPoolExecutor() as executor:
        futures = []
        for idx in range(num):
            futures.append(
                executor.submit(lambda x: run_step_multiple(x, images, test),
                                idx))
            # print(f" No.{idx} image is loaded")
        mul_comb = np.zeros([images.layer, images.layer])
        for future in as_completed(futures):
            mul = future.result()
            mul_comb += mul
        pc = run_step_pc(mul_comb, pca_comp)
    return images.data1D, pc, mul_comb
Ejemplo n.º 5
0
Created on Tue Jul 31 19:13:45 2018

@author: WangJianqiao
"""

from Dataset import dataset
from DML import Partial_Linear
from sklearn.linear_model import Lasso


n = 200
p = 250
# K = 2
K = 2
S = 10
estimator_Y = Lasso(alpha=0.01)
estimator_D = Lasso(alpha=0.01)
# no interaction
X, D, Y = dataset(n, p)
partial_linear = Partial_Linear(K, S, estimator_Y, estimator_D, mode='mean')
partial_linear.fit(X, D, Y)
theta_DML1, theta_DML2 = partial_linear.coef()
CI_DML1, CI_DML2 = partial_linear.confidence_interval(alpha=0.95)

# interaction
X, D, Y = dataset(n, p, interaction=[0, 1])
partial_linear = Partial_Linear(K, S, estimator_Y, estimator_D, mode='mean')
partial_linear.fit(X, D, Y, interaction=[0, 1])
theta_DML1, theta_DML2 = partial_linear.coef()
theta_CI_DML1, theta_CI_DML2 = partial_linear.confidence_interval(alpha=0.95, i=[0])
Ejemplo n.º 6
0
Archivo: main.py Proyecto: hao860524/DL
from Dataset import dataset
from Network import Net
from Trainer import Trainer
import os
from torch.utils.data import DataLoader
import torchvision

ROOT_DIR = './problem2-CNN'
train_scv = '/train.csv'
test_scv = '/test.csv'

if __name__ == '__main__':
    # train set
    print('processing training set...')
    train_set = face_crop(ROOT_DIR + train_scv)
    train_data = dataset(train_set)
    # test set
    print('processing test set...')
    test_set = face_crop(ROOT_DIR + test_scv)
    test_data = dataset(test_set)

    train_loader = DataLoader(dataset=train_data,
                              batch_size=10,
                              shuffle=True,
                              num_workers=4)

    test_loader = DataLoader(dataset=test_data,
                             batch_size=10,
                             shuffle=False,
                             num_workers=4)
Ejemplo n.º 7
0
import numpy as np
from Dataset import dataset, files

filename = files(
    4, 5
)  # #false star added to the end as a quick fix to running error ['star3_1.21.txt','star4_1.34.txt','star5_false.txt']
observations = np.empty((len(filename), 1), dtype=object)

observations = [dataset(filename[i]) for i in xrange(len(filename))]

for i in xrange(len(observations)):
    observations[i](
        3)  #recommended interval: star 3 - 4075 to 4175, star 4 - 2500 - 2600
Ejemplo n.º 8
0
        t0 = time.time()
        results = function(*args, **kwargs)
        t1 = time.time()
        print('Total running time: %s minutes' % str((t1 - t0) / 60))
        return results

    return function_timer


def preview(train_label, test_label):
    print('train stat', pd.value_counts(train_label))
    print('test stat', pd.value_counts(test_label))


if __name__ == '__main__':
    train_fs, test_fs, train_labels, test_labels = dataset(data_type)
    print('Load %s' % data_type)
    # preview(train_labels, test_labels)
    knn = KNeighborsClassifier()
    p = np.array([2])
    k_range = np.array([1, 7])
    param_grid = [{'p': p, 'n_neighbors': k_range}]
    grid = GridSearchCV(knn, param_grid, cv=5, n_jobs=-1)
    t0 = time.time()
    grid.fit(train_fs, train_labels)
    t1 = time.time()
    print('Total running time: %s seconds' % str(t1 - t0))
    score = grid.score(test_fs, test_labels)
    print('Score%.3f' % score)
    df = pd.DataFrame(grid.cv_results_)
    df.to_csv(cvresults_path + '%.3f.csv' % score)
Ejemplo n.º 9
0
def main(args):

    # args.model = "Model/BestModel_Augm_60.ckpt"

    print("Loading Testing Data:", args.testing)
    print("args.PCA:", args.PCA)
    print("args.features:", args.features)
    print("args.network:", args.network)
    print("args.VLAD_k:", args.VLAD_k)
    print("args.model:", args.model)
    print("flush!", flush=True)

    from Dataset import dataset
    my_dataset = dataset()
    my_dataset.loadTestingDataset(path_data=args.testing,
                                  featureName=args.features,
                                  PCA=args.PCA,
                                  window_size_sec=args.WindowSize)

    # define Network
    from Network import networkMinutes
    my_network = networkMinutes(my_dataset, args.network, VLAD_k=args.VLAD_k)

    with tf.Session() as sess:
        print("global_variables_initializer")
        sess.run(tf.global_variables_initializer())

        print("restore session")
        saver = tf.train.Saver().restore(sess, args.model)

        sess.run(tf.local_variables_initializer())
        sess.run([my_network.reset_metrics_op])

        start_time = time.time()
        total_num_batches = 0
        for i in tqdm(range(my_dataset.nb_batch_testing)):

            batch_features, batch_labels, key = my_dataset.getTestingBatch(i)

            feed_dict = {
                my_network.input: batch_features,
                my_network.labels: batch_labels,
                my_network.keep_prob: 1.0,
                my_network.weights: my_dataset.weights
            }
            sess.run([my_network.loss], feed_dict=feed_dict)  # compute loss
            sess.run(my_network.update_metrics_op,
                     feed_dict=feed_dict)  # update metrics
            vals_test = sess.run(my_network.metrics_op,
                                 feed_dict=feed_dict)  # return metrics
            predictions = sess.run(my_network.predictions,
                                   feed_dict=feed_dict)  # return metrics
            # print()
            # print(key)
            # print(os.path.split(key)[0])
            # print(os.path.split(key)[1])
            # print(predictions.shape)
            # print(predictions.dtype)
            mean_error = np.mean(np.abs(predictions - batch_labels), axis=0)
            # print(mean_error, sum(mean_error))
            predictions_name = os.path.join(
                os.path.split(key)[0],
                args.output + "_" + os.path.split(key)[1])
            print(predictions_name)
            np.save(predictions_name, predictions)

            total_num_batches += 1

            vals_test["mAP"] = np.mean([
                vals_test["auc_PR_1"], vals_test["auc_PR_2"],
                vals_test["auc_PR_3"]
            ])

        good_sample = np.sum(np.multiply(vals_test["confusion_matrix"],
                                         np.identity(4)),
                             axis=0)
        bad_sample = np.sum(
            vals_test["confusion_matrix"] -
            np.multiply(vals_test["confusion_matrix"], np.identity(4)),
            axis=0)
        vals_test["accuracies"] = good_sample / (bad_sample + good_sample)
        vals_test["accuracy"] = np.mean(vals_test["accuracies"])

        print(vals_test["confusion_matrix"])
        # print(('Batch number: %.3f Loss: {:<8.3} Accuracy: {:<5.3} mAP: {:<5.3}') % (total_num_batches, vals_test["loss"], training_accuracy, vals_test['mAP']))
        print((
            'auc: %.3f   (auc_PR_0: %.3f auc_PR_1: %.3f auc_PR_2: %.3f auc_PR_3: %.3f)'
        ) % (vals_test["auc_PR"], vals_test["auc_PR_0"], vals_test["auc_PR_1"],
             vals_test["auc_PR_2"], vals_test["auc_PR_3"]))
        print(' Loss: {:<8.3} Accuracy: {:<5.3} mAP: {:<5.3}'.format(
            vals_test['loss'], vals_test["accuracy"], vals_test['mAP']))
        print(' Time: {:<8.3} s'.format(time.time() - start_time))

        return vals_test
Ejemplo n.º 10
0
Archivo: 4.py Proyecto: helland/AST1100
import numpy as np
from Dataset import dataset, files

filename = files(
    0, 5
)  #check first star in interval. change min to see a different star or make a for loop
observations = dataset(filename[4])
#observations.reduceData() #if you want to look at a subset of the data
observations(4)
earthMass = 5.972 * 10**24
jupiterMass = 1.898 * 10**27

print "The model gives us \n v = ", observations.modelV_rel, " m/s"
print " P = ", observations.modelP, " days"
print "Planet mass = ", observations.planetMass, " kg"
print "Planet mass = ", observations.planetMass / jupiterMass, " Jupiter masses"
Ejemplo n.º 11
0
#calculate planet orbits
for i in xrange(numberOfPlanets):
    planets[i](T, N)

#create the array required to create the xml file
for p_no in xrange(numberOfPlanets):
    for t_i in xrange(N):
        if p_no == 0:  # only store values from planet 0, 3 & 5
            pos_computed[0, 0, t_i] = planets[p_no].planetHistory[t_i][2]
            pos_computed[1, 0, t_i] = planets[p_no].planetHistory[t_i][3]
        if p_no == 3:
            pos_computed[0, 1, t_i] = planets[p_no].planetHistory[t_i][2]
            pos_computed[1, 1, t_i] = planets[p_no].planetHistory[t_i][3]

        if p_no == 6:
            pos_computed[0, 2, t_i] = planets[p_no].planetHistory[t_i][2]
            pos_computed[1, 2, t_i] = planets[p_no].planetHistory[t_i][3]

planetMasses = [planetsMass[0], planetsMass[3], planetsMass[5]]
star = starSystem(starMass)
star(pos_computed, planetMasses, 3, T, N)

velocityData = []

for i in xrange(len(star.starHistory)):
    velocityData.append(star.starHistory[i][1] * 4743.72)  #convert to [m/s]

#process data with an already calculated time, radial velocity, given peculiar velocity and star mass
processData = dataset(velocityData, times, -4213.37, starMass)
processData(2)  #0, 1 or 2 depending on what you want to do with the data