Esempio n. 1
0
 def run_reg(self):
     print "run_reg"
     run.setEnvVar( "assimp_path", self.assimp_bin_path )
     run.run_test()
     rc.run()
     self.b_update_.config( state=ACTIVE  )
     return 0
 def run_reg(self):
     print "run_reg"
     run.setEnvVar("assimp_path", self.assimp_bin_path)
     run.run_test()
     rc.run()
     self.b_update_.config(state=ACTIVE)
     return 0
def run_sr():
    blocks = [
        (
            range(20, 100, 10),
            range(0, MAX_ERRORS + 1),
        ),
        (
            range(20, 400, 20),
            range(0, MAX_ERRORS + 1),
        ),
        (
            range(20, 800, 30),
            range(0, MAX_ERRORS + 1),
        ),
        (
            range(20, 1010, 40),
            range(0, MAX_ERRORS + 1),
        ),
        (
            range(20, 1010, 10),
            range(0, MAX_ERRORS + 1),
        ),
    ]

    for dell, errl in blocks:
        run.run_test(
            test_url=
            'http://www.quantified-mind.com/tests/simple_reaction_time/practice',
            js_script_template=js_scripts.SIMPLE_REACTION_SCRIPT,
            delays=list(dell),
            errors=list(errl),
            state_file='simple-reaction.json',
        )
def run_vm():
    blocks = [
        (
            # ok, so my delay was 742 on average... so let's do from 20 to 1000 in steps of 20?
            range(20, 1200, 20),
            range(0, MAX_ERRORS + 1),
        ),
        (
            range(30, 1200, 20),
            range(0, MAX_ERRORS + 1),
        ),
    ]
    for dell, errl in blocks:
        run.run_test(
            test_url=
            "http://www.quantified-mind.com/tests/feature_match/practice",
            js_script_template=js_scripts.VISUAL_MATCHING_SCRIPT,
            delays=list(dell),
            errors=list(errl),
            state_file='visual-matching.json',
        )
Esempio n. 5
0
    def calculate_fitness(self):

        tree = self.genome
        tree_string = get_node_string(tree)
        #print("Tree was turned into: ", tree_string)
        #input("GO ON?!")
        with open("behavior_tree_bot/tree.txt", "w") as file:
            file.write(tree_string)
        results = run.run_test()
        #print("Results: ", results)
        #self._fitness = 10 + results['wins'] - 2 * results['crashes'] - results['timed_out']

        self._fitness = 10 + 0.5 * results['easy_bot'] + 2 * results[
            'spread_bot'] + 2 * results['aggressive_bot'] + 0.8 * results[
                'production_bot'] - 2 * results['crashes'] - results[
                    'timed_out'] + 0.5 * results["unique_wins"]
        self._results = results

        #measurements = metrics.metrics(self.to_level())
        # Print out the possible measurements or look at the implementation of metrics.py for other keys:
        # print(measurements.keys())
        # Default fitness function: Just some arbitrary combination of a few criteria.  Is it good?  Who knows?
        # STUDENT Modify this, and possibly add more metrics.  You can replace this with whatever code you like.

        #####################################################################
        ##                            TODO
        ##  We need to think of a way to calculate fitness. The naive solution is to
        ##  literally just run the game and check our win rate. Unfortunately this would
        ##  take a lot of time. Keep an eye out for patterns that might give us a better
        ##  way to analyze trees success without actually running it. In the meantime,
        ##  this should probably just create the behavior tree from our genome and
        ##  run the planet wars program.
        ##
        ##  -Damen
        #####################################################################
        """
        coefficients = dict(
            meaningfulJumpVariance=0.5,
            negativeSpace=0.6,
            pathPercentage=0.5,
            emptyPercentage=0.6,
            linearity=-0.5,
            solvability=2.0
        )

        self._fitness = sum(map(lambda m: coefficients[m] * measurements[m],
                                coefficients))
        """
        return self
Esempio n. 6
0
    # 10개 이상의 카테고리를 10개로 묶는다.
    shrink_cat_vars_dict = shrink_cat_vars(train, cat_vars, shrink_size=10)
    shrink_cat_train = shrink_cat(train_cat, shrink_cat_vars_dict)
    shrink_cat_test = shrink_cat(test_cat, shrink_cat_vars_dict)

    cat_features = [
        feature for feature in shrink_cat_train.columns if 'new' in feature
    ]
    print("engineered features: ", cat_features)

    # 2-4. merge
    train = pd.concat([train[features], shrink_cat_train[cat_features]],
                      axis=1).astype(float)
    test = pd.concat([test[features], shrink_cat_test[cat_features]],
                     axis=1).astype(float)
    print(train.shape, test.shape)

    # 3. train and prediction
    fold = 10
    seed = 2018
    models = run_train(train, train_label, params, fold=fold, seed=seed)

    # 4. test
    prediction = run_test(test, models, fold)

    # 5. save
    test_submission = pd.DataFrame({'id': test_id, 'target': prediction})
    test_submission.to_csv('../porto-seguro/final.csv', index=False)
    print("saved result")
Esempio n. 7
0
 def run_reg(self):
     log(INFO, "Starting regression test suite.")
     run.run_test()
     rc.run()
     self.b_update_.config( state=ACTIVE  )
     return 0
    time.sleep(5*60)

if __name__ == "__main__":
    if len(sys.argv) > 1:
        infrastructure = sys.argv[1]
        config_path = sys.argv[2]
        scenario_path = sys.argv[3]
        num = sys.argv[4]

        results = []
        for i in xrange(int(num)):
            header = '###################\n' \
                    '# Running test %s #\n' \
                    '###################'

            print header % (i+1)

            results_path = run_test(sys.argv)
            print "Results for this test are in %s\n" % results_path
            results.append(results_path)

            if infrastructure == 'aws':
                reboot_aws_frontends(config_path)

        summary = '#######################' \
                  '# SUMMARY OF TESTS #' \
                  '#######################'
        for i, r in enumerate(results):
            print "Results for test %s are in: %s" % (i, r)

Esempio n. 9
0
    time.sleep(5 * 60)


if __name__ == "__main__":
    if len(sys.argv) > 1:
        infrastructure = sys.argv[1]
        config_path = sys.argv[2]
        scenario_path = sys.argv[3]
        num = sys.argv[4]

        results = []
        for i in xrange(int(num)):
            header = '###################\n' \
                    '# Running test %s #\n' \
                    '###################'

            print header % (i + 1)

            results_path = run_test(sys.argv)
            print "Results for this test are in %s\n" % results_path
            results.append(results_path)

            if infrastructure == 'aws':
                reboot_aws_frontends(config_path)

        summary = '#######################' \
                  '# SUMMARY OF TESTS #' \
                  '#######################'
        for i, r in enumerate(results):
            print "Results for test %s are in: %s" % (i, r)
Esempio n. 10
0
 def run_reg(self):
     log(INFO, "Starting regression test suite.")
     run.run_test()
     rc.run()
     self.b_update_.config( state=ACTIVE  )
     return 0
    def train_network(self, resume=False):
        csv_logger = CsvLogger(filepath=self.opt.save_model)
        if (not resume):
            best_test = 0

            start_epoch = self.opt.start_epoch
        else:
            checkpoint_path = os.path.join(
                self.opt.resume,
                'checkpoint{}.pth.tar'.format(self.opt.local_rank))
            csv_path = os.path.join(
                self.opt.resume, 'results{}.csv'.format(self.opt.local_rank))
            print("=> loading checkpoint '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path,
                                    map_location=self.opt.device)
            start_epoch = checkpoint['epoch']
            start_step = len(self.train_loader) * start_epoch
            self.optim, self.mixup = self.init_optimizer_and_mixup(
                checkpoint['optimizer'])
            best_test = checkpoint['best_prec1']
            self.model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                checkpoint_path, checkpoint['epoch']))

        for epoch in range(start_epoch, self.opt.epochs + 1):
            print("-------------------------", epoch,
                  "--------------------------------")

            train_loss, train_accuracy1, train_accuracy5, = run_train(
                self.model,
                self.train_loader,
                self.mixup,
                epoch,
                self.optim,
                self.criterion,
                self.device,
                self.opt._dtype,
                self.opt.train_batch_size,
                log_interval=2)
            test_loss, test_accuracy1, test_accuracy5 = run_test(
                self.model, self.val_loader, self.criterion, self.device,
                self.opt._dtype)

            self.optim.epoch_step()
            self.vis.plot_curves({'train_loss': train_loss},
                                 iters=epoch,
                                 title='train loss',
                                 xlabel='epoch',
                                 ylabel='train loss')
            self.vis.plot_curves({'train_acc': train_accuracy1},
                                 iters=epoch,
                                 title='train acc',
                                 xlabel='epoch',
                                 ylabel='train acc')
            self.vis.plot_curves({'val_loss': test_loss},
                                 iters=epoch,
                                 title='val loss',
                                 xlabel='epoch',
                                 ylabel='val loss')
            self.vis.plot_curves({'val_acc': test_accuracy1},
                                 iters=epoch,
                                 title='val acc',
                                 xlabel='epoch',
                                 ylabel='val acc')

            csv_logger.write({
                'epoch': epoch + 1,
                'val_error1': 1 - test_accuracy1,
                'val_error5': 1 - test_accuracy5,
                'val_loss': test_loss,
                'train_error1': 1 - train_accuracy1,
                'train_error5': 1 - train_accuracy5,
                'train_loss': train_loss
            })
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': self.model.state_dict(),
                    'best_prec1': best_test,
                    'optimizer': self.optim.state_dict()
                },
                test_accuracy1 > best_test,
                filepath=self.opt.save_model,
                local_rank=self.opt.local_rank)
            # TODO: save on the end of the cycle

            mem = '%.3gG' % (torch.cuda.memory_cached() /
                             1E9 if torch.cuda.is_available() else 0)
            print("memory gpu use : ", mem)
            if test_accuracy1 > best_test:
                best_test = test_accuracy1