Beispiel #1
0
def wait_and_create_network(host, port, timeout=wait_to_create_timeout):
    ttl = 0
    while ttl < timeout:
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            break
        except socket.error:
            pass
        sleep(0.25)
        ttl += 0.25
    if ttl == timeout:
        raise Exception("Sorry, cannot connect to jsbridge extension, port %s" % port)

    back_channel, bridge = create_network(host, port)
    sleep(0.5)

    while back_channel.registered is False:
        back_channel.close()
        bridge.close()
        asyncore.socket_map = {}
        sleep(1)
        back_channel, bridge = create_network(host, port)

    return back_channel, bridge
def wait_and_create_network(host, port, timeout=wait_to_create_timeout):
    ttl = 0
    while ttl < timeout:
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            break
        except socket.error:
            pass
        sleep(.25)
        ttl += .25
    if ttl == timeout:
        raise Exception(
            "Sorry, cannot connect to jsbridge extension, port %s" % port)

    back_channel, bridge = create_network(host, port)
    sleep(.5)

    while back_channel.registered is False:
        back_channel.close()
        bridge.close()
        asyncore.socket_map = {}
        sleep(1)
        back_channel, bridge = create_network(host, port)

    return back_channel, bridge
Beispiel #3
0
def SNc_experiment(frac, theta, weight=1.5, outdeg=0.6, title=''):
    nest.ResetKernel()
    nest.SetKernelStatus({
        'resolution': dt,  # set simulation resolution
        'print_time': True  # enable printing of simulation progress
    })

    cell_params = nodes.cell_params
    cell_params['MSN_D1']['params']['theta'] = theta
    cell_params['MSN_D2']['params']['theta'] = theta
    pop = network.create_populations(cell_params, scale=1)
    network.create_network(pop)
    network.connect_SNc(pop, frac=frac, weight=weight, outdeg=0.6)

    #stim_times = [(1000,1010)]
    #network.add_stims(pop['SNc'], stim_times, amp = 500)

    spikes, voltages, thetameter = setup_recordings(pop)

    nest.Simulate(simtime)

    pl.figure()
    pl.title(title)
    rates = plot_raster(pop, spikes, simtime)

    pl.figure()
    pl.title(title)
    plot_theta(pop, thetameter)

    return rates
Beispiel #4
0
def wait_and_create_network(host, port, timeout=wait_to_create_timeout):
    deadline = datetime.utcnow() + timedelta(seconds=timeout)
    connected = False

    while datetime.utcnow() < deadline:
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            connected = True
            break
        except socket.error:
            pass
        sleep(.25)
    if not connected:
        raise Exception("Cannot connect to jsbridge extension, port %s" % port)

    back_channel, bridge = create_network(host, port)
    sleep(.5)

    while back_channel.registered is False:
        back_channel.close()
        bridge.close()
        asyncore.socket_map = {}
        sleep(1)
        back_channel, bridge = create_network(host, port)

    return back_channel, bridge
Beispiel #5
0
def wait_and_create_network(host, port, timeout=wait_to_create_timeout):
    deadline = datetime.utcnow() + timedelta(seconds=timeout)
    connected = False

    while datetime.utcnow() < deadline:
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            connected = True
            break
        except socket.error:
            pass
        sleep(.25)
    if not connected:
        raise Exception("Cannot connect to jsbridge extension, port %s" % port)

    back_channel, bridge = create_network(host, port)
    sleep(.5)

    while back_channel.registered is False:
        back_channel.close()
        bridge.close()
        asyncore.socket_map = {}
        sleep(1)
        back_channel, bridge = create_network(host, port)

    return back_channel, bridge
Beispiel #6
0
def worker(i, ckpt_freq, load_ckpt_file, render):
    """
    Set up a single worker.

    I'm still not 100% about how Distributed TensorFlow works, but as I
    understand it we do "between-graph replication": each worker has a separate
    graph, with the global set of parameters shared between all workers (pinned
    to worker 0).
    """
    dirname = 'summaries/%d_worker%d' % (int(time.time()), i)
    os.makedirs(dirname)
    summary_writer = tf.summary.FileWriter(dirname, flush_secs=1)

    tf.reset_default_graph()
    server = tf.train.Server(cluster, job_name="worker", task_index=i)
    sess = tf.Session(server.target)

    with tf.device("/job:worker/task:0"):
        create_network('global')
    with tf.device("/job:worker/task:%d" % i):
        w = Worker(sess, i, 'PongNoFrameskip-v4', summary_writer)
        if render:
            w.render = True

    if i == 0:
        saver = tf.train.Saver()
        checkpoint_file = os.path.join('checkpoints', 'network.ckpt')

    print("Waiting for cluster cluster connection...")
    sess.run(tf.global_variables_initializer())

    if load_ckpt_file is not None:
        print("Restoring from checkpoint '%s'..." % load_ckpt_file,
              end='',
              flush=True)
        saver.restore(sess, load_ckpt_file)
        print("done!")

    print("Cluster established!")
    step = 0
    while True:
        print("Step %d" % step)
        done = w.run_step()
        if done:
            w.reset_env()
        step += 1
        if (i == 0) and (step % ckpt_freq == 0):
            print("Saving checkpoint at step %d..." % step, end='', flush=True)
            saver.save(sess, checkpoint_file)
            print("done!")
Beispiel #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--resume',
        '--resume',
        default='log/models/last.checkpoint',
        type=str,
        metavar='PATH',
        help='path to latest checkpoint (default:log/last.checkpoint)')
    parser.add_argument('-d', type=int, default=0, help='Which gpu to use')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(device)

    ds_val = create_test_dataset(512)

    attack_method = config.create_evaluation_attack_method(device)

    if os.path.isfile(args.resume):
        load_checkpoint(args.resume, net)

    print('Evaluating')
    clean_acc, adv_acc = eval_one_epoch(net, ds_val, device, attack_method)
    print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc))
Beispiel #8
0
    def create_network(self):
        self.input_image = self.iterator.get_next()

        # self.input_image = tf.image.resize_images(self.input_image, [28, 28])

        self.resulting_img, self.latent_space = network.create_network(
            self.input_image)
        self.l1_loss = losses_helper.reconstruction_loss_l1(
            self.resulting_img, self.input_image)
        # self.loss_kl_shared = losses_helper.KL_divergence_loss(z_mean, z_std)
        self.loss = tf.reduce_mean(self.l1_loss)

        self.opt = tf.train.AdamOptimizer(self.learning_rate,
                                          beta1=0.5,
                                          beta2=0.999)
        self.grads = self.opt.compute_gradients(self.loss)
        self.apply_gradient_op = self.opt.apply_gradients(
            self.grads, global_step=self.global_step)

        variable_averages = tf.train.ExponentialMovingAverage(
            0.9999, self.global_step)
        self.variables_averages_op = variable_averages.apply(
            tf.trainable_variables())
        self.train_op = tf.group(self.apply_gradient_op,
                                 self.variables_averages_op)

        self.create_tensorboard()
Beispiel #9
0
def get_network(ckpt_dir):
    sess = tf.Session()
    network = create_network(scope='worker_0')
    ckpt_file = tf.train.latest_checkpoint(ckpt_dir)
    saver = tf.train.Saver()
    saver.restore(sess, ckpt_file)
    return sess, network
def main():
    model = create_network().to(device)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    EvalAttack = config.create_evaluation_attack_method(device)

    now_train_time = 0
    for epoch in range(1, args.epochs + 1):
        # adjust learning rate for SGD
        adjust_learning_rate(optimizer, epoch)

        s_time = time()
        descrip_str = 'Training epoch: {}/{}'.format(epoch, args.epochs)
        # adversarial training
        train(args, model, device, train_loader, optimizer, epoch, descrip_str)
        now_train_time += time() - s_time

        acc, advacc = eval_one_epoch(model, test_loader, device, EvalAttack)

        # save checkpoint
        if epoch % args.save_freq == 0:
            torch.save(
                model.state_dict(),
                os.path.join(config.model_dir,
                             'model-wideres-epoch{}.pt'.format(epoch)))
Beispiel #11
0
def process_single_epoch():
    print('**************')
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', type=int, default=0, help='Which gpu to use')
    args = parser.parse_args()

    DEVICE = torch.device('cuda:{}'.format(args.d))
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(DEVICE)

    nat_val = load_test_dataset(10000, natural=True)
    adv_val = load_test_dataset(10000, natural=False)

    AttackMethod = config.create_evaluation_attack_method(DEVICE)

    filename = '../ckpts/6leaf-epoch29.checkpoint'
    print(filename)
    if os.path.isfile(filename):
        load_checkpoint(filename, net)

    print('Evaluating Natural Samples')
    clean_acc, adv_acc = my_eval_one_epoch(net, nat_val, DEVICE, AttackMethod)
    print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc))

    print('Evaluating Adversarial Samples')
    clean_acc, adv_acc = my_eval_one_epoch(net, adv_val, DEVICE, AttackMethod)
    print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc))
Beispiel #12
0
def generate():
    #load the notes used to train the model
    with open('data/notes', 'rb') as filepath:
        notes = pickle.load(filepath)

    # Get all pitch names
    pitchnames = sorted(set(item for item in notes))
    # Get all pitch names
    n_vocab = len(set(notes))
    network_input = network.get_inputSequences(notes, pitchnames, n_vocab)
    normalized_input = np.array(network_input)
    normalized_input = np.reshape(normalized_input, (len(network_input), 100, 1))
    model = network.create_network(normalized_input, n_vocab)

    songs = glob('predict/*.mid')

    for file in songs:
        notes = []
        # converting .mid file to stream object
        print('parsing file' + file)
        midi = converter.parse(file)
        notes_to_parse = []
        try:
            # Given a single stream, partition into a part for each unique instrument
            parts = instrument.partitionByInstrument(midi)
        except:
            pass
        if parts: # if parts has instrument parts
            notes_to_parse = parts.parts[0].recurse()
        else:
            notes_to_parse = midi.flat.notes
        for element in notes_to_parse:
            mlen = element.duration.quarterLength
            if(mlen == 0):
                continue
            try:
                length,istrue = note.duration.quarterLengthToClosestType(mlen)
            except:
                continue
            #Standardize the notes to be 32/16/8/4/2/1
            lengthstr = str((1/note.duration.convertTypeToNumber(length))*4)
            if(float(lengthstr) > 4):
                lengthstr = str(4)
            elif(float(lengthstr) < 0.25):
                lengthstr = str(0.25)
            lengthstr = 'L'+lengthstr
            if isinstance(element, note.Note):
                # If its a note wright the note
                notes.append(str(element.pitch)+lengthstr)
            if isinstance(element, note.Rest):
                # If its a rest wright it
                notes.append(str('R') + lengthstr)
            elif(isinstance(element, chord.Chord)):
                # If its a chord wright it
                notes.append('.'.join(str(n) for n in element.normalOrder) + lengthstr)

        seq = midi2net(notes,n_vocab)
        prediction_output = generate_notes(model, network_input, pitchnames, n_vocab,seq)
        create_midi(prediction_output,file + '.predicted')
Beispiel #13
0
def get_model_pred(model_file, dataset):
    # model creation
    model = create_network()
    model.load_params(model_file)
    model.initialize(dataset=dataset)
    # pred will have shape (num_clips, num_classes) and contain class probabilities
    pred = model.get_outputs(test_set)
    return pred
Beispiel #14
0
def get_model_pred(model_file, dataset):
    # model creation
    model = create_network()
    model.load_params(model_file)
    model.initialize(dataset=dataset)
    # pred will have shape (num_clips, num_classes) and contain class probabilities
    pred = model.get_outputs(test_set)
    return pred
Beispiel #15
0
def create_one_network(config, states, scope='dueling', reuse=False):
    out = network.create_network(config, states, scope=scope, reuse=reuse)

    value, advantage = tf.split(out, [1, config.get('num_actions')], axis=1)
    adv_mean = tf.reduce_mean(advantage, axis=1)

    q_val = value + advantage - tf.expand_dims(adv_mean, axis=1)

    return q_val
Beispiel #16
0
def generate_music(file):
    notes = get_notes_from_file(file)
    vocab = load_vocabulary_from_training()
    vocab_size = len(vocab)

    network_input = prepare_sequence_for_prediction(notes, vocab)
    model = create_network(vocab_size, get_best_weights_filename())
    prediction_output = generate_notes(model, network_input, vocab, vocab_size)
    save_midi_file(prediction_output)
 def inference(self):
     if self.__network:
         return self
     # Building network...
     with tf.variable_scope('ResNet'):
         net = create_network(self.x, self.h, self.keep_prob,
                              self.numClasses)
     self.__network = net
     return self
def wait_and_create_network(host, port, timeout=10):
    ttl = 0
    while ttl < timeout:
        sleep(.25)
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            break
        except socket.error:
            pass
    return create_network(host, port)
Beispiel #19
0
def wait_and_create_network(host, port, timeout=10):
    ttl = 0
    while ttl < timeout:
        sleep(.25)
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            break
        except socket.error:
            pass
    return create_network(host, port)
Beispiel #20
0
 def inference(self):
     if self.__network:
         return self
     # Building network...
     with tf.variable_scope('ResNet'):
         net, net_flatten, vol_feat, cls_act_map = create_network(self.x, args.n_cls, self.is_train)
     self.__network = net
     self.__features = net_flatten
     self.__vol_features = vol_feat
     self.__cls_act_map = cls_act_map
     # cls_act_map: [64, 14]
     return self
Beispiel #21
0
def train_network():
    epochs = 50
    notes = []
    with open('data/notes', 'rb') as filepath:
        notes = pickle.load(filepath)
    vocablen = len(set(notes))

    net_in, net_out = prepare_sequences(notes, vocablen)

    model = network.create_network(net_in, vocablen)

    train(model, net_in, net_out, epochs)
Beispiel #22
0
def main():
    DEVICE = torch.device('cuda:{}'.format(args.d))
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(DEVICE)
    criterion = config.create_loss_function().to(DEVICE)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    TrainAttack = config.create_attack_method(DEVICE)
    EvalAttack = config.create_evaluation_attack_method(DEVICE)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    while True:
        if now_epoch > config.num_epochs:
            break
        now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(
            now_epoch, config.num_epochs,
            lr_scheduler.get_lr()[0])
        train_one_epoch(net,
                        ds_train,
                        optimizer,
                        criterion,
                        DEVICE,
                        descrip_str,
                        TrainAttack,
                        adv_coef=args.adv_coef)
        if config.eval_interval > 0 and now_epoch % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, DEVICE, EvalAttack)

        lr_scheduler.step()

        save_checkpoint(now_epoch,
                        net,
                        optimizer,
                        lr_scheduler,
                        file_name=os.path.join(
                            config.model_dir,
                            'epoch-{}.checkpoint'.format(now_epoch)))
def main():
    # get parameters from user
    args = sys.argv
    united_data_path = args[1]
    treshold_dir = args[2]

    multi_idx_df = load_df(united_data_path)
    calc_corr_dist_per_all_panels(multi_idx_df)

    pie_chart_neigbours_panel(multi_idx_df, is_pie=False, is_save=False)

    above_treshold_network = network.create_network(treshold_dir)
    graph = network.create_graph_from_network(above_treshold_network)
Beispiel #24
0
def wait_and_create_network(host, port, timeout=10):
    ttl = 0
    while ttl < timeout:
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            break
        except socket.error:
            pass
        sleep(.25)
        ttl += .25
    
    back_channel, bridge = create_network(host, port)
    sleep(.5)
    
    while back_channel.registered is False:
        back_channel.close()
        bridge.close()
        asyncore.socket_map = {}
        sleep(1)
        back_channel, bridge = create_network(host, port)
    
    return back_channel, bridge
Beispiel #25
0
def wait_and_create_network(host, port, timeout=10):
    ttl = 0
    while ttl < timeout:
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.connect((host, port))
            s.close()
            break
        except socket.error:
            pass
        sleep(.25)
        ttl += .25

    back_channel, bridge = create_network(host, port)
    sleep(.5)

    while back_channel.registered is False:
        back_channel.close()
        bridge.close()
        asyncore.socket_map = {}
        sleep(1)
        back_channel, bridge = create_network(host, port)

    return back_channel, bridge
def generate():
    with open('data/notes', 'rb') as filepath:
        notes = pickle.load(filepath)

    pitchnames = sorted(set(item for item in notes))
    n_vocab = len(set(notes))

    network_input = network.get_inputSequences(notes, pitchnames, n_vocab)
    normalized_input = np.array(network_input)
    normalized_input = np.reshape(normalized_input,
                                  (len(network_input), 100, 1))
    model = network.create_network(normalized_input, n_vocab)
    prediction_output = generate_notes(model, network_input, pitchnames,
                                       n_vocab)
    create_midi(prediction_output)
Beispiel #27
0
def watch_doom_game(configuration):
    game, actions = init_watching_environment(configuration)
    load_file = load_file_simple(configuration)
    session = tf.compat.v1.Session()
    learn, get_q_values, get_best_action = create_network(
        session, len(actions))
    saver = tf.compat.v1.train.Saver()
    saver.restore(session, load_file)
    session.run(tf.compat.v1.global_variables_initializer())
    stacked_frames = deque([
        np.zeros((state_size[0], state_size[1]), dtype=np.int)
        for i in range(stack_size)
    ],
                           maxlen=4)
    run_episodes(game, get_best_action, actions, stacked_frames)
Beispiel #28
0
def caller_process(model_file, qin, qout):
    caller_network = create_network(model_file)
    tc = 0
    with torch.no_grad():
        while True:
            item = qin.get()
            if item == "wait":
                qout.put("wait")
                continue
            if item is None:
                qout.put(None)
                break
            signal = item[1]
            tc += 1
            output = caller_network(torch.tensor(signal).cuda())
            qout.put((item[0], output, item[2]))
    print("caller done", tc)
Beispiel #29
0
    def __init__(self, sess, worker_n, env_name, summary_writer):
        self.sess = sess
        self.env = EnvWrapper(gym.make(env_name), prepro2=prepro2, frameskip=4)

        worker_scope = "worker_%d" % worker_n
        self.network = create_network(worker_scope)
        self.summary_writer = summary_writer
        self.scope = worker_scope

        self.reward = tf.Variable(0.0)
        self.reward_summary = tf.summary.scalar('reward', self.reward)

        policy_optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)
        value_optimizer = tf.train.AdamOptimizer(learning_rate=0.0005)

        self.update_policy_gradients, self.apply_policy_gradients, self.zero_policy_gradients, self.grad_bufs_policy = \
            create_train_ops(self.network.policy_loss,
                             policy_optimizer,
                             update_scope=worker_scope,
                             apply_scope='global')

        self.update_value_gradients, self.apply_value_gradients, self.zero_value_gradients, self.grad_bufs_value = \
            create_train_ops(self.network.value_loss,
                             value_optimizer,
                             update_scope=worker_scope,
                             apply_scope='global')

        self.val_summ = tf.summary.scalar('value_loss',
                                          self.network.value_loss)

        self.init_copy_ops()

        self.frame_stack = deque(maxlen=N_FRAMES_STACKED)
        self.reset_env()

        self.t_max = 10000
        self.steps = 0
        self.episode_rewards = []
        self.render = False
        self.episode_n = 1

        self.value_log = deque(maxlen=100)
        self.fig = None
Beispiel #30
0
def train_network():
    notes = get_notes_from_dataset()
    vocab = create_vocabulary_for_training(notes)
    vocab_size = len(vocab)

    training_sequence, validation_sequence = prepare_sequences_for_training(
        notes, vocab, vocab_size, BATCH_SIZE)

    latest_checkpoint = get_latest_checkpoint()

    if latest_checkpoint:
        print(
            f"*** Restoring from the lastest checkpoint: {latest_checkpoint} ***"
        )
        model = load_model(latest_checkpoint)
    else:
        model = create_network(vocab_size)

    train(model, training_sequence, validation_sequence)
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(device)
    criterion = config.create_loss_function().to(device)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    train_attack = config.create_attack_method(device)
    eval_attack = config.create_evaluation_attack_method(device)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    for i in range(now_epoch, config.num_epochs):
        # if now_epoch > config.num_epochs:
        #     break
        # now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(i, config.num_epochs,
                                                             lr_scheduler.get_last_lr()[0])
        train_one_epoch(net, ds_train, optimizer, criterion, device,
                        descrip_str, train_attack, adv_coef=args.adv_coef)
        if config.eval_interval > 0 and i % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, device, eval_attack)

        lr_scheduler.step()

    save_checkpoint(i, net, optimizer, lr_scheduler,
                    file_name=os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(i)))
Beispiel #32
0
            'prob': max_label_prob[i],
            'bounded_box': bb
        })
    return result


def generate_random_color():
    return (np.random.randint(0, 255), np.random.randint(0, 255),
            np.random.randint(0, 255))


model = 'model/deploy.prototxt'
weights = 'model/bvlc_alexnet.caffemodel'
batch_size = network.BATCH_SIZE
map_id_word = read_label('model/label.txt')
net = network.create_network(model, weights)

if __name__ == '__main__':
    img = cv2.imread(sys.argv[1])
    img = reshape_image(img, frame_size=(1024, 1024, 3), mode='fit')

    print 'creating eastimated possible bounded box'
    start_time = time.time()
    bounded_box_list = segment.get_bounded_box(img, 0.5, 500, 20)
    end_time = time.time()
    print end_time - start_time, 'time spent'

    bounded_box_list = filter_by_boundray_ratio(
        filter_by_size(bounded_box_list))
    print "predicting labels for bounded box"
    SIMULATION_HORIZON = 1500 # in ticks
    INITIAL_STEPS = 100

    # the first argument is the algorithm: "random" "dijkstra" for now
    alg = sys.argv[1]
    if not alg in ['random', 'dijkstra', 'dijkstraBounded', 'lessCarAhead',\
                   'dynamicRandom', 'decmcts', 'decmcts1Block', 'decmcts2Block',\
                   'decmcts5Block']:
        print('Invalid Option!')
        sys.exit()

    # Fire up the model
    netlogo = fire_up(GRID_SIZE, False)

    # Create Networkx, representative of netlogo transportaiton network in python
    network = create_network(GRID_SIZE)

    # create cars and assign random routes, and finish the setup
    cars = create_cars(NUM_CARS, GRID_SIZE, netlogo)

    # initialize some critical measurements (indicators of mobility)
    mean_travel_times = []
    average_mean_speed_so_far = []
    last_step_time = None

    # Run the procedure
    try:
        for i in range(SIMULATION_HORIZON):
            if i % 500 == 0 or 'decmcts' in alg:
                print(i)
Beispiel #34
0
# ----------------------------------------------------------------------------
import os
from neon import logger as neon_logger
from neon.util.argparser import NeonArgparser
from neon.optimizers import RMSProp
from neon.transforms import Misclassification
from neon.callbacks.callbacks import Callbacks
from network import create_network
from data import make_train_loader, make_val_loader

eval_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'whale_eval.cfg')
config_files = [eval_config] if os.path.exists(eval_config) else []
parser = NeonArgparser(__doc__, default_config_files=config_files)
args = parser.parse_args()

model, cost_obj = create_network()

assert 'train' in args.manifest, "Missing train manifest"
assert 'val' in args.manifest, "Missing val manifest"

train = make_train_loader(args.manifest['train'], args.manifest_root, model.be,
                          noise_file=args.manifest.get('noise'))

neon_logger.display('Performing train and test in validation mode')
val = make_val_loader(args.manifest['val'], args.manifest_root, model.be)
metric = Misclassification()

model.fit(dataset=train,
          cost=cost_obj,
          optimizer=RMSProp(learning_rate=1e-4),
          num_epochs=args.epochs,
Beispiel #35
0
train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.cfg')
config_files = [train_config] if os.path.exists(train_config) else []

parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--depth', type=int, default=2,
                    help='depth of each stage (network depth will be 9n+2)')
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()
random_seed = args.rng_seed if args.rng_seed else 0

# Check that the proper manifest sets have been supplied
assert 'train' in args.manifest, "Missing train manifest"
assert 'val' in args.manifest, "Missing validation manifest"

model, cost = create_network(args.depth)

# setup data provider
train = make_train_loader(args.manifest['train'], args.manifest_root, model.be, args.subset_pct,
                          random_seed)
test = make_validation_loader(args.manifest['val'], args.manifest_root, model.be, args.subset_pct)

# tune batch norm parameters on subset of train set with no augmentations
tune_set = make_tuning_loader(args.manifest['train'], args.manifest_root, model.be)

# configure callbacks
callbacks = Callbacks(model, eval_set=test, metric=Misclassification(), **args.callback_args)
callbacks.add_callback(BatchNormTuneCallback(tune_set), insert_pos=0)

# begin training
opt = GradientDescentMomentum(0.1, 0.9, wdecay=0.0001, schedule=Schedule([82, 124], 0.1))
Beispiel #36
0
		Y_new = Y1
		X_new = int(round(float(X2*Y_new)/float(Y2)))

	img = cv2.resize(img, (X_new, Y_new))

	X_space_center = ((X1 - X_new)/2)
	Y_space_center = ((Y1 - Y_new)/2)

	image_frame[Y_space_center: Y_space_center+Y_new, X_space_center: X_space_center+X_new] = img
	return image_frame


feature_size = network.FEATURE_SIZE
batch_size = network.BATCH_SIZE

net = network.create_network(model, weights)

file_list = getFiles(sys.argv[1])
random.shuffle(file_list)

number_of_iteration =  get_num_batch(len(file_list), batch_size)


batch_size_cluster = 10000

if len(sys.argv) != 4:
	images_feature = None
	epoch = 20
	k = 90
	clr = MiniBatchKMeans(n_clusters=k, init='k-means++', batch_size=batch_size_cluster, compute_labels=True, max_no_improvement=None, n_init=10)
	for e in xrange(epoch):
Beispiel #37
0
def run_net(config, word_index, x_train, y_train, x_val, y_val, x_test,
            y_test):
    model = network.create_network(config, word_index)
    res = evaluate_model(config, model, x_train, y_train, x_val, y_val, x_test,
                         y_test)
    print(str(res) + "\n")
Beispiel #38
0
be = gen_backend(**extract_valid_args(args, gen_backend))
be.bsz = 32

# setup data provider
shape = dict(channel_count=3, height=112, width=112, scale_min=128, scale_max=128)

testParams = VideoParams(frame_params=ImageParams(center=True, flip=False, **shape),
                         frames_per_clip=16)

common = dict(target_size=1, nclasses=101, datum_dtype=np.uint8)

videos = DataLoader(set_name='val', repo_dir=args.data_dir, media_params=testParams,
                    shuffle=False, **common)

# initialize model
model = create_network()
model.load_params(args.model_weights)
model.initialize(dataset=videos)

# read label index file into dictionary
label_index = {}
with open(args.class_ind_file) as label_index_file:
    for line in label_index_file:
        index, label = line.split()
        label_index[int(index) - 1] = label


def print_label_on_image(frame, top_labels):
    labels = [(label_index[index], "{0:.2f}".format(prob)) for (index, prob) in top_labels]

    font = cv2.FONT_HERSHEY_COMPLEX_SMALL
Beispiel #39
0
import torch
import json
import numpy as np
from tensorboardX import SummaryWriter

import torch.nn as nn
import torch.optim as optim
import os
import time

DEVICE = torch.device('cuda:{}'.format(args.d))
torch.backends.cudnn.benchmark = True

writer = SummaryWriter(log_dir=config.log_dir)

net = create_network()
net.to(DEVICE)
criterion = CrossEntropyWithWeightPenlty(net.other_layers, DEVICE,
                                         config.weight_decay)  #.to(DEVICE)
optimizer = config.create_optimizer(net.other_layers.parameters())
lr_scheduler = config.create_lr_scheduler(optimizer)

Hamiltonian_func = Hamiltonian(net.layer_one, config.weight_decay)
layer_one_optimizer = optim.SGD(net.layer_one.parameters(),
                                lr=lr_scheduler.get_lr()[0],
                                momentum=0.9,
                                weight_decay=5e-4)
lyaer_one_optimizer_lr_scheduler = optim.lr_scheduler.MultiStepLR(
    layer_one_optimizer, milestones=[15, 19], gamma=0.1)
LayerOneTrainer = FastGradientLayerOneTrainer(Hamiltonian_func,
                                              layer_one_optimizer,
Beispiel #40
0
from neon.util.argparser import NeonArgparser

from data import make_train_loader, make_test_loader
from network import create_network

# parse the command line arguments
train_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'train.cfg')
config_files = [train_config] if os.path.exists(train_config) else []

parser = NeonArgparser(__doc__, default_config_files=config_files)
parser.add_argument('--subset_pct', type=float, default=100,
                    help='subset of training dataset to use (percentage)')
args = parser.parse_args()

random_seed = 0 if args.rng_seed is None else args.rng_seed
model, cost = create_network()

# setup data provider
assert 'train' in args.manifest, "Missing train manifest"
assert 'test' in args.manifest, "Missing validation manifest"

train = make_train_loader(args.manifest['train'], args.manifest_root, model.be, args.subset_pct,
                          random_seed)
valid = make_test_loader(args.manifest['test'], args.manifest_root, model.be, args.subset_pct)

# setup callbacks
callbacks = Callbacks(model, eval_set=valid, **args.callback_args)

# gradient descent with momentum, weight decay, and learning rate decay schedule
learning_rate_sched = Schedule(list(range(6, args.epochs, 6)), 0.1)
opt_gdm = GradientDescentMomentum(0.003, 0.9, wdecay=0.005, schedule=learning_rate_sched)