Exemple #1
0
def main():

    bs = 32
    use_seq_len = 12
    seq_len = 30
    mode = 'val'  # --> !!!
    dataset_dir = '/media/Data/datasets/bair/softmotion30_44k/'
    ckpt_dir = os.path.join('/home/mandre/adr/trained_models/bair')

    frames, actions, states, steps, iterator = get_data(
        dataset='bair',
        mode=mode,
        batch_size=bs,
        shuffle=False,
        dataset_dir=dataset_dir,
        sequence_length_train=seq_len,
        sequence_length_test=seq_len)

    _, _, _, val_steps, val_iterator = get_data(dataset='bair',
                                                mode='val',
                                                batch_size=bs,
                                                shuffle=False,
                                                dataset_dir=dataset_dir,
                                                sequence_length_train=seq_len,
                                                sequence_length_test=seq_len)

    gpu_options = tf.GPUOptions(visible_device_list='1')
    config = tf.ConfigProto(gpu_options=gpu_options)

    evaluate_autoencoder_A(frames,
                           actions,
                           states,
                           use_seq_len=use_seq_len,
                           iterator=val_iterator,
                           ckpt_dir=ckpt_dir,
                           context_frames=2,
                           gaussian=True,
                           hc_dim=64,
                           ha_dim=8,
                           z_dim=6,
                           units=256,
                           lstm_units=256,
                           lstm_layers=1,
                           steps=steps,
                           val_steps=val_steps,
                           ec_filename='Ec_a_test.h5',
                           a_filename='A_a_test.h5',
                           d_filename='D_a_test.h5',
                           l_filename='L_a_test.h5',
                           set_states=False,
                           config=config,
                           evaluate=True,
                           predict=True,
                           random_window=True,
                           eval_kld=False)
    def init_with_server(self):
        print('setting up connection with server')
        address = ('localhost', self.args.port)
        self.sock = setup_connection(address)

        print('syncing with server')
        data = {'action':'setup'}
        send_data(self.sock, data)
        message = self.wait_for_response(loud=False)

        assert message['action'] == 'setup'

        self.algorithm_names = message['algorithms']
        self.num_algorithms = len(self.algorithm_names)
        
        message['dataset_args']['train']['data_path'] = self.args.data_path
        message['dataset_args']['val']['data_path'] = self.args.data_path
        message['dataset_args']['test']['data_path'] = self.args.data_path

        parser = get_main_parser()
        args = get_raw_args(parser, stdin_list=['--dataset_args', json.dumps(message['dataset_args'])], args_dict={'dataset':message['dataset']})

        print(args)
        datasets = get_data(args)
        self.dataset = datasets['test']

        assert message['dataset_sig'] == self.dataset.get_signature(), 'The datasets files and dataset_args must match!'

        self.sampler = Sampler(self.dataset, np.random.randint(99999), self.args.num_targets)
Exemple #3
0
 def begin_contact(self, contact: b2Contact):
     data_a, data_b = get_data(contact)
     if data_a != self.car and data_b != self.car and \
             data_a not in self.car.tires and data_b not in self.car.tires and \
             isinstance(data_a, GameObject) and isinstance(data_b, GameObject) and \
             (not data_a.ignore_ray_casting or not data_b.ignore_ray_casting):
         self.collide = True
Exemple #4
0
def main():
    transforms = {}
    for line in utils.get_data(21):
        line = line.split('=>')
        pattern = []
        for row in line[0].strip().split('/'):
            pattern.append(tuple([x for x in row]))
        transform = []
        for row in line[1].strip().split('/'):
            transform.append([x for x in row])
        transforms[tuple(pattern)] = transform
    raw_pattern = ['.#.', '..#', '###']
    matrix = [[x for x in pattern] for pattern in raw_pattern]
    # print(np.array(matrix))
    # for pattern in get_flips(matrix):
    #     print(np.array(pattern))

    for _ in range(18):
        matrix = enhance(matrix, transforms)
    c = 0
    for y in matrix:
        for x in y:
            if x == '#':
                c += 1
    print(c)
Exemple #5
0
def main():
    parser = get_parser_ens()
    args = parser.parse_args()
    args.method = os.path.basename(__file__).split('-')[1][:-3]
    if args.aug_test:
        args.method = args.method + '_augment'
    torch.backends.cudnn.benchmark = True

    compute = {
        'CIFAR10':
        ['VGG16BN', 'PreResNet110', 'PreResNet164', 'WideResNet28x10'],
        'CIFAR100':
        ['VGG16BN', 'PreResNet110', 'PreResNet164', 'WideResNet28x10'],
        'ImageNet': ['ResNet50']
    }

    for model in compute[args.dataset]:
        args.model = model
        logger = Logger(base='./logs/')
        print('-' * 5, 'Computing results of', model, 'on', args.dataset + '.',
              '-' * 5)

        loaders, num_classes = get_data(args)
        targets = get_targets(loaders['test'], args)
        args.num_classes = num_classes
        model = get_model(args)

        for run in range(1, 6):
            log_probs = []

            fnames = read_models(args,
                                 base=os.path.expanduser(args.models_dir),
                                 run=run if args.dataset != 'ImageNet' else -1)
            fnames = sorted(fnames,
                            key=lambda a: int(a.split('-')[-1].split('.')[0]))

            for ns in range(100)[:min(
                    len(fnames), 100 if args.dataset != 'ImageNet' else 50)]:
                start = time.time()
                model.load_state_dict(get_sd(fnames[ns], args))
                ones_log_prob = one_sample_pred(loaders['test'], model)
                log_probs.append(ones_log_prob)
                logger.add_metrics_ts(ns,
                                      log_probs,
                                      targets,
                                      args,
                                      time_=start)
                logger.save(args)

            os.makedirs('.megacache', exist_ok=True)
            logits_pth = '.megacache/logits_%s-%s-%s-%s-%s'
            logits_pth = logits_pth % (args.dataset, args.model, args.method,
                                       ns + 1, run)
            log_prob = logsumexp(np.dstack(log_probs), axis=2) - np.log(ns + 1)
            print('Save final logprobs to %s' % logits_pth, end='\n\n')
            np.save(logits_pth, log_prob)
Exemple #6
0
 def begin_contact(self, contact: b2Contact):
     if self.collide:
         return
     data_a, data_b = get_data(contact)
     if isinstance(data_a, Car) or isinstance(data_a, Tire):
         self.collide = True
         data_a.on_explosion(self, 0)
     elif isinstance(data_b, Car) or isinstance(data_b, Tire):
         self.collide = True
         data_b.on_explosion(self, 0)
Exemple #7
0
def main():
    data = list(utils.get_data(23))
    proc0 = Processor(0, data)
    p0 = proc0.execute()
    try:
        while True:
            next(p0)
    except Exception as e:
        print(e)
        print(p0.mul_count)
        print(p0.iternum)
Exemple #8
0
    def end_contact(self, contact: b2Contact):
        super().end_contact(contact)
        data_a, data_b = get_data(contact)

        if type(data_a) == LootArea or type(data_b) == LootArea:
            if (data_a == self or data_b == self) and \
                    self.is_bank_loot and not self.looted:
                self.is_bank_loot = False
                self.bank_timeout_state = 0
                self.bank_timer = 0
                self.queue_lines.append('Вы вышли из зоны грабежа :(')
Exemple #9
0
def main():
    parser = get_parser_ens()
    args = parser.parse_args()
    args.method = os.path.basename(__file__).split('-')[1][:-3]
    torch.backends.cudnn.benchmark = True

    if args.aug_test:
        args.method = args.method + '_augment'

    print('Computing for all datasets!')

    compute = {
        'CIFAR10': ['VGG16BN', 'WideResNet28x10do'],
        'CIFAR100': ['VGG16BN', 'WideResNet28x10do']
    }

    for model in compute[args.dataset]:
        args.model = model
        logger = Logger()
        print('-' * 5, 'Computing results of', model, 'on', args.dataset + '.',
              '-' * 5)

        loaders, num_classes = get_data(args)
        targets = get_targets(loaders['test'], args)

        fnames = read_models(args, base=os.path.expanduser(args.models_dir))
        args.num_classes = num_classes
        model = get_model(args)

        for try_ in range(1, 6):
            fnames = np.random.permutation(fnames)
            model.load_state_dict(get_sd(fnames[0], args))

            log_probs = []
            for ns in range(100):
                start = time.time()
                ones_log_prob = one_sample_pred(loaders['test'], model)
                log_probs.append(ones_log_prob)
                logger.add_metrics_ts(ns,
                                      log_probs,
                                      targets,
                                      args,
                                      time_=start)
                logger.save(args)

            os.makedirs('./.megacache', exist_ok=True)
            logits_pth = '.megacache/logits_%s-%s-%s-%s-%s'
            logits_pth = logits_pth % (args.dataset, args.model, args.method,
                                       ns + 1, try_)
            log_prob = logsumexp(np.dstack(log_probs), axis=2) - np.log(ns + 1)
            print('Save final logprobs to %s' % logits_pth)
            np.save(logits_pth, log_prob)
            print('Used weights from %s' % fnames[0], end='\n\n')
Exemple #10
0
def run_experiment(build_model, args):
    x_train, y_train, x_valid, y_valid, x_test, y_test = get_data(args)

    output = open("../results/results.txt", "a")

    'First learning rate'
    base_model = build_model()

    start = time.time()
    output.write("Start Training %s - %s \n" % (base_model.model_name, start))
    print('Train first learning rate')
    lr = learning_rates[0]
    weight_name = '../results/best_weights_%s_%s.hdf5' % (base_model.model_name, lr)
    model = base_model.train(x_train, y_train, x_valid, y_valid, epoch_size=100, lr=lr, weight_name=weight_name)

    print("Testing")
    model.load_weights(weight_name)
    x_pred = evaluator.predict(base_model, model, x_test)

    'Save predictions'
    np.save("../results/predictions_%s_%s_%s.npy" % (base_model.model_name, args.d, lr), x_pred)

    test_result = evaluator.mean_roc_auc(x_pred, y_test)
    print("Mean ROC-AUC: %s" % test_result)
    output.write("%s -  Mean ROC-AUC: %s, %s \n" % (lr, test_result, time.time()))

    'For each learning rate'
    for lr_index in range(1, len(learning_rates)):
        lr = learning_rates[lr_index]

        base_model = build_model()

        print('Train %s' % lr)
        weight_name = '../results/best_weights_%s_%s.hdf5' % (base_model.model_name, lr)
        model = base_model.retrain(x_train, y_train, x_valid, y_valid, epoch_size=100, lr=lr,
                                   lr_prev=learning_rates[lr_index - 1], weight_name=weight_name)

        print("Testing")
        model.load_weights(weight_name)
        x_pred = evaluator.predict(base_model, model, x_test)

        'Save predictions'
        np.save("../results/predictions_%s_%s_%s.npy" % (base_model.model_name, args.d, lr), x_pred)

        test_result = evaluator.mean_roc_auc(x_pred, y_test)
        print("Mean ROC-AUC: %s" % test_result)
        output.write("%s -  Mean ROC-AUC: %s, %s \n" % (lr, test_result, time.time()))

    end = time.time()
    output.write("End Training %s - %s" % (base_model.model_name, end))

    output.close()
Exemple #11
0
    def begin_contact(self, contact: b2Contact):
        super().begin_contact(contact)
        data_a, data_b = get_data(contact)

        if type(data_a) == EnergyItem or type(data_b) == EnergyItem:
            if type(data_b) == EnergyItem:
                data_a, data_b = data_b, data_a
            if data_a.collect():
                self.on_energy_collect()
        elif type(data_a) == LootArea or type(data_b) == LootArea:
            if (data_a == self or data_b == self) and not self.is_bank_loot:
                self.is_bank_loot = True
                self.queue_lines.append('Начинаем грабить банк :)')
Exemple #12
0
def main():

    bs = 32
    use_seq_len = 12
    seq_len = 30
    mode = 'train'  # --> !!!
    dataset_dir = '/media/Data/datasets/bair/softmotion30_44k/'
    ckpt_dir = os.path.join(
        '/home/mandre/adr/trained_models/bair/random_window')

    frames, actions, states, steps, _ = get_data(dataset='bair',
                                                 mode=mode,
                                                 batch_size=bs,
                                                 shuffle=False,
                                                 dataset_dir=dataset_dir,
                                                 sequence_length_train=seq_len,
                                                 sequence_length_test=seq_len)

    gpu_options = tf.GPUOptions(visible_device_list='0')
    config = tf.ConfigProto(gpu_options=gpu_options)

    evaluate_adr_vp(frames,
                    actions,
                    states,
                    ckpt_dir=ckpt_dir,
                    context_frames=2,
                    use_seq_len=use_seq_len,
                    gaussian_a=True,
                    hc_dim=128,
                    ha_dim=16,
                    ho_dim=32,
                    za_dim=10,
                    lstm_units=256,
                    lstm_a_units=256,
                    lstm_layers=2,
                    lstm_a_layers=1,
                    action_net_units=256,
                    steps=steps,
                    eo_load_name='Eo_t00103_v0011_tf10tv2.h5',
                    do_load_name='D_o_t00103_v0011_tf10tv2.h5',
                    l_load_name='L_t00103_v0011_tf10tv2.h5',
                    ec_load_name='Ec_a_t00243_v0023_tf10tv2.h5',
                    a_load_name='A_a_t00243_v0023_tf10tv2.h5',
                    da_load_name='D_a_t00243_v0023_tf10tv2.h5',
                    la_load_name='L_a_t00243_v0023_tf10tv2.h5',
                    config=config,
                    evaluate=True,
                    predict=False,
                    feedback_predictions=True,
                    random_window=True)
Exemple #13
0
def main():
    data = list(utils.get_data(18))
    proc0 = Processor(0, data)
    proc1 = Processor(1, data)
    proc0.set_other(proc1)
    proc1.set_other(proc0)
    p0 = proc0.execute()
    p1 = proc1.execute()
    try:
        while True:
            next(p0)
            next(p1)
    except Exception as e:
        print(e)
        print(f'Part 2: {proc1.num_sent}')
Exemple #14
0
def main():

    epochs = 10000
    shuffle = True
    bs = 32
    seq_len = 30
    shuffle = True
    dataset_dir = '/media/Data/datasets/bair/softmotion30_44k/'
    save_path = os.path.join(os.path.expanduser('~/'),
                             'adr/trained_models/bair/')

    gpu_options = tf.GPUOptions(visible_device_list='1')
    config = tf.ConfigProto(gpu_options=gpu_options)

    frames, _, states, steps, train_iterator = get_data(
        dataset='bair',
        mode='train',
        batch_size=bs,
        shuffle=shuffle,
        dataset_dir=dataset_dir,
        sequence_length_train=seq_len,
        initializable=False)

    val_frames, _, val_states, val_steps, val_iterator = get_data(
        dataset='bair',
        mode='val',
        batch_size=bs,
        shuffle=False,
        dataset_dir=dataset_dir,
        sequence_length_test=seq_len)

    sess = tf.Session(config=config)
    K.set_session(sess)

    history = train_inference_model(frames, states, val_frames, val_states,
                                    epochs, steps, val_steps, save_path)
Exemple #15
0
def main():
    parser = get_parser_ens()
    args = parser.parse_args()
    args.method = os.path.basename(__file__).split('-')[1][:-3]
    torch.backends.cudnn.benchmark = True

    if args.aug_test:
        args.method = args.method + '_augment'

    os.makedirs('./logs', exist_ok=True)

    compute = {
        'CIFAR10':  ['BayesVGG16BN', 'BayesPreResNet110', 'BayesPreResNet164', 'BayesWideResNet28x10'],
        'CIFAR100': ['BayesVGG16BN', 'BayesPreResNet110', 'BayesPreResNet164', 'BayesWideResNet28x10'],
        'ImageNet': ['BayesResNet50']
    }

    for model in compute[args.dataset]:
        args.model = model
        logger = Logger()
        print('-'*5, 'Computing results of', model, 'on', args.dataset + '.', '-'*5)

        loaders, num_classes = get_data(args)
        targets = get_targets(loaders['test'], args)

        fnames = read_models(args, base=os.path.expanduser(args.models_dir))
        args.num_classes = num_classes
        model = get_model(args)

        for run in range(1, 6):
            print('Repeat num. %s' % run)
            log_probs = []

            checkpoint = get_sd(fnames[0], args)
            model.load_state_dict(checkpoint)

            for ns in range(100 if args.dataset != 'ImageNet' else 50):
                start = time.time()
                ones_log_prob = one_sample_pred(loaders['test'], model)
                log_probs.append(ones_log_prob)
                logger.add_metrics_ts(ns, log_probs, targets, args, time_=start)
                logger.save(args)

            os.makedirs('.megacache', exist_ok=True)
            logits_pth = '.megacache/logits_%s-%s-%s-%s-%s'
            logits_pth = logits_pth % (args.dataset, args.model, args.method, ns+1, run)
            log_prob = logsumexp(np.dstack(log_probs), axis=2) - np.log(ns+1)
            np.save(logits_pth, log_prob)
Exemple #16
0
    def post_solve(self, contact: b2Contact, impulse: b2ContactImpulse):
        super().post_solve(contact, impulse)
        data_a, data_b = get_data(contact)

        impulse = abs(sum(impulse.normalImpulses))
        if isinstance(data_b, BaseWall) or isinstance(data_a, BaseWall):
            if impulse > 60:
                self.on_wall_collide()
        elif (type(data_a) == Police and not data_a.is_broken) or \
                (type(data_b) == Police and not data_b.is_broken) or \
                (type(data_a) == Tire and type(data_a.car) == Police
                 and not data_a.car.is_broken) or \
                (type(data_b) == Tire and type(data_b.car) == Police
                 and not data_b.car.is_broken):
            if impulse > 20:
                self.on_police_collide()
Exemple #17
0
def main():
    particles = {}
    for i, line in enumerate(utils.get_data(20)):
        full = line.strip().split(' ')
        p = ThreeDPoint(
            *[int(x) for x in full[0].lstrip('p=<').rstrip('>,').split(',')])
        v = ThreeDPoint(
            *[int(x) for x in full[1].lstrip('v=<').rstrip('>,').split(',')])
        a = ThreeDPoint(
            *[int(x) for x in full[2].lstrip('a=<').rstrip('>,').split(',')])
        particles[i] = Particle(i, p, v, a)
    moved = move_particles(particles, 500)
    closest = min(moved.values(),
                  key=lambda x: abs(x.p.x) + abs(x.p.y) + abs(x.p.z))
    print(f'Part 1: {closest.pid}')
    remaining = remove_collided(particles, 500)
    print(f'Part 2: {len(remaining)}')
def preprocess():
    # Data Preparation
    # ==================================================
    # Load data
    path_data = [i for i in FLAGS.path.split(',')]
    print("Loading data...")
    if FLAGS.class_weight == 0:
        x_train, y_train = utils.read_file_seg(path_data[0])
    else:
        x_train, y_train = utils.read_file_seg_sparse(path_data[0])
    # Build vocabulary
    max_document_length = max([len(x) for x in x_train])
    print(max_document_length)
    print('Loading a Word2vec model...')
    word_2vec = utils.load_word2vec(FLAGS.w2v)  # 加载词向量
    maxlen = FLAGS.sequence_length
    index_dict, word_vectors, x = utils.create_dictionaries(
        maxlen, word_2vec, x_train)
    print('Embedding weights...')
    vocab_size = FLAGS.embed_size
    word_size, embedding_weights = utils.get_data(index_dict,
                                                  word_vectors,
                                                  vocab_dim=vocab_size)
    # test set
    print('Test set ....')
    if FLAGS.class_weight == 0:
        x_test, ytest = utils.read_file_seg(path_data[1])
    else:
        x_test, ytest = utils.read_file_seg_sparse(path_data[1])
    index_dict1, word_vectors1, x_test = utils.create_dictionaries(
        maxlen, word_2vec, x_test)
    y_test = np.array(ytest)
    log.info("Vocabulary Size: {:d}".format(word_size))
    print('train_x_y_shape', x.shape, y_train.shape)
    print('test_x_y_shape', x_test.shape, y_test.shape)
    print("Vocabulary Size: {:d}".format(word_size))
    return x, y_train, x_test, y_test, embedding_weights
Exemple #19
0
def main():
    infected = set()
    height = 0
    width = 0
    for y, line in enumerate(utils.get_data(22)):
        if not line.strip():
            continue
        height += 1
        width = len(line.strip())
        for x, val in enumerate(line.strip()):
            if val == '#':
                infected.add(Point(x, y))
    mid = Point(width // 2, height // 2)
    print(f'Mid is {mid}')
    total_infected = 0
    burst_sim = burst(mid, infected.copy())
    for _ in range(10000):
        total_infected = next(burst_sim)
    print(f'Part 1: {total_infected}')

    burst_sim = advanced_burst(mid, infected)
    for _ in range(10000000):
        total_infected = next(burst_sim)
    print(f'Part 2: {total_infected}')
class LoginTest(BaseTest):
    def setUp(self):
        super().setUp()
        home_page = HomePage(self.driver)
        home_page.click_account_button()
        home_page.click_login_button()

    @data(*get_data(DATA_PATH))
    @unpack
    def test_invalid_credentials(self, email, password, error_message):
        if email == "VALID_EMAIL":
            email = Variables.VALID_EMAIL

        if password == "VALID_PASSWORD":
            password = Variables.VALID_PASSWORD

        login_page = LoginPage(self.driver)

        login_page.enter_email(email)
        login_page.enter_password(password)
        login_page.click_login_button()

        login_page.error_should_be_visible(error_message)

    def test_valid_credentials(self):
        login_page = LoginPage(self.driver)

        login_page.enter_email(Variables.VALID_EMAIL)
        login_page.enter_password(Variables.VALID_PASSWORD)
        login_page.click_login_button()

        basket_page = BasketPage(self.driver)

        basket_page.user_should_be_logged_in(Variables.VALID_EMAIL)

        basket_page.logout_user()
 def PreSolve(self, contact, manifold):
     for body in get_data(contact):
         for i in self.connects_pre[body]:
             i(contact, manifold)
 def EndContact(self, contact):
     for body in get_data(contact):
         for i in self.connects_end[body]:
             i(contact)
 def BeginContact(self, contact):
     for body in get_data(contact):
         for i in self.connects_begin[body]:
             i(contact)
Exemple #24
0
 def __init__(self, npy_files, alpha=1):
     x, y = get_data(npy_files, np.arange(11), 10**8)
     self.query_char_ids = x
     self.log_counts = np.log(y)
     self.sample_weights = np.exp(self.log_counts)**alpha
Exemple #25
0
from utils import utils
from typing import Dict, Tuple, List


# Single tab deliminated line of input
config: List[int] = [utils.tab_split(x, int) for x in utils.get_data(6)][0]

# This will store each unique config, and which step
# it was encountered on
encountered_configs: Dict[Tuple, int] = {}
count = 0
while tuple(config) not in encountered_configs:
    encountered_configs[tuple(config)] = count

    highest_value = max(config)
    highest_bank = config.index(highest_value)

    config[highest_bank] = 0
    for i in range(highest_value):
        config[(highest_bank + i + 1) % len(config)] += 1

    count += 1

print(f'Part 1: {count}')
print(f'Part 2: {count - encountered_configs[tuple(config)]}')
Exemple #26
0
from utils.utils import set_torch_seed, set_gpu, get_tasks, get_data, get_model, get_backbone, get_strategy
from utils.utils import compress_args, get_args, torch_summarize
from utils.builder import ExperimentBuilder
from utils.bunch import bunch
import sys
import pprint

if __name__ == '__main__':

    args, excluded_args, parser = get_args()
    args = bunch.bunchify(args)

    set_torch_seed(args.seed)
    device = set_gpu(args.gpu)

    datasets = get_data(args)
    tasks = get_tasks(args)
    backbone = get_backbone(args, device)
    strategy = get_strategy(args, device)
    model = get_model(backbone, tasks, datasets, strategy, args, device)

    compressed_args = compress_args(bunch.unbunchify(args), parser)
    print(" ----------------- FULL ARGS (COMPACT) ----------------")
    pprint.pprint(compressed_args, indent=2)
    print(" ------------------------------------------------------")
    print(" ------------------ UNRECOGNISED ARGS -----------------")
    pprint.pprint(excluded_args, indent=2)
    print(" ------------------------------------------------------")

    system = ExperimentBuilder(model, tasks, datasets, device, args)
    system.load_pretrained()
Exemple #27
0
args = parser.parse_args()
with open(args.config) as f:
    config = yaml.load(f)
config['config_file'] = args.config.replace('/','.').split('.')[-2]

seed = config['seed']
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(seed)

n_epochs = config['optimization']['n_epochs']

if not args.disable_cuda and torch.cuda.is_available():
    device = torch.device('cuda:{}'.format(args.gpu))
else:
    device = torch.device('cpu')

logger = Logger(config)
model = get_model(config['model'])
optim = get_optimizer(model.parameters(),config['optimization'])
train_loader, valid_loader, test_loader = get_data(config['data'])

## Train
for i in range(n_epochs):
    for data, label in train_loader:
        break


 def PostSolve(self, contact, impulse):
     for body in get_data(contact):
         for i in self.connects_post[body]:
             i(contact, impulse)
Exemple #29
0
    random.seed(args.seed)
    tf.set_random_seed(args.seed)
    np.set_printoptions(precision=3)
    np.set_printoptions(suppress=True)

    required_folders = ['log', 'summary', 'model']
    for folder in required_folders:
        if not os.path.exists(folder):
            os.makedirs(folder)

    # load data
    feat_idx = np.arange(11)
    args.n_feat = 8 * 8 + 2 * 16 + 1  # ip-> 8*8, port-> 2*16, protocol->1
    start_t = time.time()

    train_x, train_y = get_data(args.train, feat_idx, args.n_examples)
    print('train x shape:', train_x.shape, 'y max', np.max(train_y), 'y min',
          np.min(train_y))

    valid_x, valid_y = get_data(args.valid, feat_idx, args.n_examples)
    print('valid x shape:', valid_x.shape, 'y max', np.max(valid_y), 'y min',
          np.min(valid_y))

    test_x, test_y = get_data_list(args.test, feat_idx, args.n_examples)
    print('Load data time %.1f seconds' % (time.time() - start_t))
    if not args.evaluate:
        assert len(
            test_x) == 1, 'test on more than 1 minute (forgot --evaluate?)'

    data_stat = get_stat('train before log', train_x, train_y)
    train_y = np.log(train_y)
Exemple #30
0
from sklearn.ensemble import RandomForestClassifier
# from sklearn.model_selection import train_test_split
from utils import utils
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.model_selection import GridSearchCV
import sys


c_parser = utils.c_parser
prefix = c_parser.get('COMMON', 'prefix')
suffix = c_parser.get('CNN', 'suffix')
TEST_FP = c_parser.get('CNN', 'TEST_FP').format(prefix, suffix)

TRAIN_FP = c_parser.get('CNN', 'TRAIN_FP').format(prefix, suffix)

train_data, train_labels = utils.get_data(TRAIN_FP)
test_data, test_labels = utils.get_data(TEST_FP)

print('Training Features Shape:', train_data.shape)
print('Training Labels Shape:', train_labels.shape)
print('Validation Features Shape:', test_data.shape)
print('Validation Labels Shape:', test_labels.shape)

# Instantiate model with n decision trees
# entropy 1000 ~.60
# gini 1000 ~.60
rf = RandomForestClassifier(n_estimators=5000,
                            criterion='gini',
                            n_jobs=-1,
                            min_samples_leaf=75,
                            min_samples_split=5,