Example #1
0
def genLearning(data, cls):
    
    classes = list(set(cls))
    n = len(data) #length of data 
    predicted = []
    p = np.random.permutation(len(data))
    data = data[p]
    cls = cls[p]
    
    kf = KFold(n, n_folds = 10, shuffle = False) #Cross Validation
    for trainIndex, testIndex in kf:
        Mu, S = train(data[trainIndex],cls[trainIndex]) #train the data, calculate Mu and Sigma for train data
        predicted.append(classify(data[testIndex], cls[testIndex], classes, Mu, S)) #Classify
    predicted = list(itertools.chain(*predicted))
    f.evaluate(predicted,cls, classes)
    return (predicted,cls)
Example #2
0
def gen_fit(pop, goal):
    evaluations = map(lambda x: f.evaluate(x), pop)
    fits = map(lambda x: abs(goal - x), evaluations)
    fit_pop = sorted(zip(fits, pop))
    ordered_pop = list(zip(*fit_pop))[1]

    return ordered_pop, fit_pop[0][0], fit_pop[1][0]
Example #3
0
def find_min(old_features,
             function,
             life_time,
             feature_range,
             num_of_children,
             speed_parameter,
             sensivity,
             smart_mutation=False):
    new_features = copy.deepcopy(old_features)
    print(new_features)
    init_results = np.average(f.evaluate(old_features, function))
    old_generation_results = init_results
    print(f.evaluate(old_features, function))
    print('init results: ', init_results)
    History = []

    find_max = False
    for year in range(life_time):
        # print(year)

        if smart_mutation == True:
            new_features, old_features = f.run_genetic_algorithm_with_smart_mutation(
                new_features,
                old_generation_results,
                function,
                feature_range,
                num_of_children,
                sensivity=sensivity,
                find_max=find_max)
        else:
            old_features = f.run_genetic_algorithm(old_features,
                                                   function,
                                                   feature_range,
                                                   num_of_children,
                                                   speed_parameter,
                                                   year,
                                                   sensivity=sensivity,
                                                   find_max=find_max)
        old_generation_results = f.evaluate(old_features, function)
        result = np.average(old_generation_results)
        History.append(result)

    print('done!')
    print('final result', np.average(old_generation_results))
    f.plot(History)
    return new_features
Example #4
0
def find_max(features, function, life_time, feature_range, num_of_children,
             speed_parameter):
    init_results = np.average(f.evaluate(features, function))
    print(f.evaluate(features, function))
    print('init results: ', init_results)
    History = []

    find_max = True
    for year in range(life_time):
        # print(year)
        features = f.run_genetic_algorithm(features, function, feature_range,
                                           num_of_children, speed_parameter,
                                           year, find_max)
        old_generation_results = f.evaluate(features, function)
        result = np.average(old_generation_results)
        History.append(result)

    print('done!')
    print('final result', np.average(old_generation_results))

    f.plot(History)
Example #5
0
def main():
    # for reproduciblity
    random_seed = 2020
    random.seed(random_seed)
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED
    
    
    args = parse_args()
    
    
    # model loading
    model = get_pose_net(
        config, is_train=True
    )
    # model = model.half()
    model = model.to("cuda" if torch.cuda.is_available() else "cpu")
    
    valid_dataset = coco(
        config,
        config.DATASET.ROOT,
        config.DATASET.TEST_SET,
        is_train=False,
        is_eval=True,
        transform=tfms.Compose([
            tfms.ToTensor(),
        ])
    )
    
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE,
        shuffle=False,
        # num_workers=confi g.WORKERS,
        # pin_memory=True
        drop_last=False,
    )
            

    if config.MODEL.CHECKPOINT is not None:
        info = load_checkpoint(config.MODEL.CHECKPOINT)
        if info is not None:
            _, model_dic, _, _ = info
            
            try:
                model.load_state_dict(model_dic)
                logging.info('Model Loaded.\n')
            except Exception as e:
                raise FileNotFoundError('Model shape is different. Plz check.')
            

    end = time.time()
    logging.info('Evaluation Ready\n')
    
    result = evaluate(config, model, valid_loader)
    
    with open(f'{config.result_dir}/data.json', 'w') as f:
        json.dump(result, f)  
    logging.info(f"Taken {time.time()-end:.5f}s\n")
    
    
    os.makedirs(config.result_dir, exist_ok=True)
    
    logging.info(f"From a Pose estimator.\n")
    valid_dataset.keypoint_eval('/home/mah/workspace/PoseFix/data/input_pose_path/keypoints_valid2017_results.json', config.result_dir + '/ori/')
    
    logging.info(f"Pose Estimator with PoseFix.\n")
    valid_dataset.keypoint_eval(result, config.result_dir + '/pred')
Example #6
0
from functions import load_data, train, evaluate, plot_result
from keras.models import Sequential
from keras.layers import Dense, Dropout, LeakyReLU

(x_train, y_train), (x_test, y_test) = load_data()

model = Sequential()
model.add(Dense(784, activation='relu', input_shape=x_train.shape[1:]))
model.add(Dense(128))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(64))
model.add(LeakyReLU(alpha=0.01))
model.add(Dropout(rate=0.25))
model.add(Dense(128))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(64))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(10, activation='softmax'))

model.compile(
    loss='categorical_crossentropy',
    optimizer='sgd', 
    metrics=['accuracy']
)

history = train(model, x_train, y_train)
plot_result(history)
evaluate(model, x_test, y_test)
Example #7
0
def main():
    # for reproduciblity
    random_seed = 2020
    random.seed(random_seed)
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.cuda.manual_seed(random_seed)
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    args = parse_args()
    reset_config(config, args)

    # model loading
    model = get_pose_net(config, is_train=True)
    # model = model.half()
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    logging.info(f"Training on CUDA: {torch.cuda.is_available()}")
    model = model.to("cuda" if torch.cuda.is_available() else "cpu")

    # Data loading process
    train_dataset = coco(
        config,
        config.DATASET.ROOT,
        config.DATASET.TRAIN_SET,
        is_train=True,
        is_eval=False,
        transform=tfms.RandomErasing(p=0.8, scale=(0.5, 0.5)),
    )

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.TRAIN.BATCH_SIZE,
        shuffle=True,
        drop_last=True)

    valid_dataset = coco(config,
                         config.DATASET.ROOT,
                         config.DATASET.TEST_SET,
                         is_train=False,
                         is_eval=True,
                         transform=None)

    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.TEST.BATCH_SIZE,
        shuffle=False,
        drop_last=False)

    start_epoch = config.TRAIN.BEGIN_EPOCH
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=5e-4,
                                 weight_decay=1e-5,
                                 eps=1e-5)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[90, 120],
                                                     gamma=0.1)
    losschecker = BestLossChecker()

    if config.MODEL.CHECKPOINT is not None:
        info = load_checkpoint(config.MODEL.CHECKPOINT)
        if info is not None:
            start_epoch, model_dic, optim_dic, sched_dic = info

            try:
                model.load_state_dict(model_dic)
                logging.info('Model Loaded.')

                optimizer.load_state_dict(optim_dic)
                logging.info('Optimizer Loaded.')

                if sched_dic is not None:
                    scheduler.load_state_dict(sched_dic)
                else:
                    scheduler.last_epoch = start_epoch
                scheduler.optimizer.load_state_dict(optim_dic)
                logging.info('Scheduler Loaded.')
                logging.info('All Weights Loaded...\n')
            except Exception as e:
                start_epoch = config.TRAIN.BEGIN_EPOCH
                logging.info('Model shape is different. Plz check.')
                logging.info('Starts with init weights...\n')

    end = time.time()
    logging.info('Training Ready\n')

    for epoch in range(start_epoch, config.TRAIN.END_EPOCH):
        if epoch == 10:
            config.TEST.FLIP_TEST = True

        if epoch % 5 == 0 and epoch != 0:
            result = evaluate(config, model, valid_loader)

            if epoch % 100 == 0 and epoch != 0:
                with open(f'{config.result_dir}/data.json', 'w') as f:
                    json.dump(result, f)

                os.makedirs(config.result_dir, exist_ok=True)
            valid_dataset.keypoint_eval(result, config.result_dir + '/pred')
            valid_dataset.keypoint_eval(
                './data/input_pose_path/keypoints_valid2017_results.json',
                config.result_dir + '/ori/')
            end = time.time()

        losses = train(config,
                       epoch=epoch,
                       loader=train_loader,
                       model=model,
                       optimizer=optimizer)
        total_loss, hm_loss, coord_loss = losses
        is_best = losschecker.update(epoch, total_loss, hm_loss, coord_loss)

        try:
            state_dict = model.module.state_dict()
        except Exception as e:
            state_dict = model.state_dict()

        save_checkpoint(
            {
                'epoch': epoch,
                'model': get_model_name(config),
                'state_dict': state_dict,
                'optimizer': optimizer.state_dict(),
            }, is_best, "./weights_2")

        scheduler.step()
        spent = time.time() - end
        hour = int(spent // 3600)
        min = int((spent - hour * 3600) // 60)
        second = (spent - hour * 3600 - min * 60)

        logging.info(
            f"Epoch {epoch} taken {hour:d}h{min:2d}m {second:2.3f}s\n")
        end = time.time()
Example #8
0
 def render(self):
     """Reuse evaluate function, render current policy
     for one episode when seed=0"""
     evaluate(self.policy, 1, render=False, env_name=self.env_name)
Example #9
0
 def evaluate(self):
     """Use the function you write to evaluate current policy.
     Return the mean episode reward of 1000 episodes when seed=0."""
     result = evaluate(self.policy, 1000, env_name=self.env_name)
     return result
Example #10
0
import functions
import tensorflow as tf
import numpy as np
with tf.Session() as sess:
    x_train, y_train, x_valid, y_valid, x_test, y_test = functions.dataset_load(
    )
    #x_test = functions.dataset_grayscale(x_test)
    restorer = tf.train.import_meta_graph('lenet.meta')
    restorer.restore(sess, './lenet')
    accuracy_operation = sess.graph.get_tensor_by_name('accuracy_op:0')
    x = sess.graph.get_tensor_by_name('placeholder_x:0')
    y = sess.graph.get_tensor_by_name('placeholder_y:0')
    test_accuracy = functions.evaluate(x_test, y_test)
    print("Test Accuracy = {:.3f}".format(test_accuracy))
    print("Max memory use",
          sess.run(tf.contrib.memory_stats.MaxBytesInUse()) // 1024, " Kbytes")
Example #11
0
for hp in hyparams_rf:
    rf_classifier = RandomForestClassifier(n_estimators=100,
                                           criterion="entropy",
                                           min_samples_leaf=hp[2],
                                           max_features=hp[0],
                                           oob_score=True,
                                           n_jobs=-1,
                                           max_depth=hp[1])
    rf_classifier = rf_classifier.fit(X_train_frame, Y_train_frame)

    #-------------------------training results---------------------------------------
    #frame-wise accuracy score
    train_frame_wise = rf_classifier.score(X_train_frame, Y_train_frame)
    #call-wise accuracy score
    train_call_wise = f.evaluate(rf_classifier, X_train_chirp_unaltered,
                                 Y_train_chirp)

    f_id = '/Users/gciniwe/Desktop/Final_Results/mfcc_results/rf_train_results_' + str(
        hp[0]) + "_" + str(hp[1]) + '.txt'
    with open(f_id, 'wb') as r:
        d = np.array(
            [rf_classifier.oob_score_, train_frame_wise, train_call_wise[0]])
        np.savetxt(g, d, fmt='%f')

    id = '/Users/gciniwe/Desktop/Final_Results/mfcc_results/rf_train_freq_table_' + str(
        hp[0]) + '_' + str(hp[1]) + '.txt'
    with open(id, 'w') as outfile:
        outfile.write(str(train_call_wise[1]))

    #------------------------validation results-------------------------------------
    #frame-wise accuracy score