Example #1
0
def main():
    args = get_args()

    data_dir = "../data/"
    ## data preparation
    _, valid_loader = data.load_data(data_dir=data_dir,
                                     input_size=224,
                                     batch_size=args.batch_size,
                                     augmentation=args.augmentation)
    print('Computing t-SNE embedding')
    tsne = TSNE(n_components=2)
    t0 = time()
    pretrained_model = Network(20).to(args.device)
    pretrained_model.load_state_dict(torch.load('tsne.pt'))
    outputs = []
    label_list = []
    for inputs, labels in valid_loader:
        inputs = inputs.to(args.device)
        output = forward(pretrained_model, inputs)
        outputs.append(output.cpu().detach().numpy().astype(np.float64))
        label_list.append(labels)
    output = np.concatenate(outputs, axis=0)
    labels = np.concatenate(label_list, axis=0)
    result = tsne.fit_transform(output)

    plot_embedding(
        result, labels,
        't-SNE embedding of the 20 classes (time %.2fs)' % (time() - t0))
Example #2
0
    def post(self):
        if 'address' in request.form:
            address = request.form['address']
            prefixlen = request.form.get('prefixlen', type=int, default=
                            get_max_prefixlen(address))
            try:
                qry = Network.overlaps_with(address, prefixlen)
            except ValueError as e:
                return jsonify( { 'error': str(e) } ), 400

            if qry.count() > 0:
                conflicts = ','.join(map(lambda n: n.cidr, qry.all()))
                msg = 'ip address conflict: {}'.format(conflicts)
                return jsonify( { 'error':  msg} ), 400

        else:
            prefixlen = request.form.get('prefixlen', type=int, default=32)
            address = Network.next_unused_network(prefixlen)

        try:
            network = Network(g.user, address, prefixlen)
            db.session.add(network)
            db.session.commit()
        except AssertionError as e:
            return jsonify( { 'error' : str(e) }), 400

        return jsonify(message='success', network=network.as_dict())
Example #3
0
    def __init__(self, env, args):
        self.env = env
        self.args = args

        # define the network
        self.net = Network(self.env.observation_space.shape[0],
                           self.env.action_space.shape[0])
        self.old_net = Network(self.env.observation_space.shape[0],
                               self.env.action_space.shape[0])

        # make sure the net and old net have the same parameters
        self.old_net.load_state_dict(self.net.state_dict())

        # define the optimizer
        self.optimizer = torch.optim.Adam(self.net.critic.parameters(),
                                          lr=self.args.lr)

        # define the running mean filter
        self.running_state = ZFilter((self.env.observation_space.shape[0], ),
                                     clip=5)

        if not os.path.exists(self.args.save_dir):
            os.mkdir(self.args.save_dir)
        self.model_path = self.args.save_dir + self.args.env_name
        if not os.path.exists(self.model_path):
            os.mkdir(self.model_path)

        self.start_episode = 0
Example #4
0
def batch_update_weights(optimizer: optim.Optimizer, network: Network, batch):
    optimizer.zero_grad()

    value_loss = 0
    reward_loss = 0
    policy_loss = 0

    # Format training data
    image_batch = np.array([item[0] for item in batch])
    action_batches = np.array([item[1] for item in batch])
    target_batches = np.array([item[2] for item in batch])
    action_batches = np.swapaxes(action_batches, 0, 1)
    target_batches = target_batches.transpose(1, 2, 0)

    # Run initial inference
    values, rewards, policy_logits, hidden_states = network.batch_initial_inference(
        image_batch)
    predictions = [(1, values, rewards, policy_logits)]

    # Run recurrent inferences
    for action_batch in action_batches:
        values, rewards, policy_logits, hidden_states = network.batch_recurrent_inference(
            hidden_states, action_batch)
        predictions.append(
            (1.0 / len(action_batches), values, rewards, policy_logits))

        hidden_states = scale_gradient(hidden_states, 0.5)

    # Calculate losses
    for target_batch, prediction_batch in zip(target_batches, predictions):
        gradient_scale, values, rewards, policy_logits = prediction_batch
        target_values, target_rewards, target_policies = \
            (torch.tensor(list(item), dtype=torch.float32, device=values.device.type) \
            for item in target_batch)

        gradient_scale = torch.tensor(gradient_scale,
                                      dtype=torch.float32,
                                      device=values.device.type)
        value_loss += gradient_scale * scalar_loss(values, target_values)
        reward_loss += gradient_scale * scalar_loss(rewards, target_rewards)
        policy_loss += gradient_scale * cross_entropy_with_logits(
            policy_logits, target_policies, dim=1)

    value_loss = value_loss.mean() / len(batch)
    reward_loss = reward_loss.mean() / len(batch)
    policy_loss = policy_loss.mean() / len(batch)

    total_loss = value_loss + reward_loss + policy_loss
    logging.info('Training step {} losses'.format(network.training_steps()) + \
        ' | Total: {:.5f}'.format(total_loss) + \
        ' | Value: {:.5f}'.format(value_loss) + \
        ' | Reward: {:.5f}'.format(reward_loss) + \
        ' | Policy: {:.5f}'.format(policy_loss))

    # Update weights
    total_loss.backward()
    optimizer.step()
    network.increment_step()

    return total_loss, value_loss, reward_loss, policy_loss
Example #5
0
    def __init__(self, gamma, epsilon, lr, n_actions=, input_dims,
                 mem_size, batch_size, eps_min=0.01, eps_dec=5e-7,
                 replace=1000, chkpt_dir='tmp/dueling_ddqn'):
        self.gamma = gamma
        self.epsilon = epsilon
        self.lr = lr
        self.n_actions = n_actions
        self.input_dims = input_dims
        self.batch_size = batch_size
        self.eps_min = eps_min
        self.eps_dec = eps_dec
        self.replace_target_cnt = replace
        self.chkpt_dir = chkpt_dir
        self.action_space = [i for i in range(self.n_actions)]
        self.learn_step_counter = 0

        self.memory = ReplayBuffer(mem_size, input_dims, n_actions)

        self.q_eval = Network(self.lr, self.n_actions,
                                   input_dims=self.input_dims,
                                   name='lunar_lander_dueling_ddqn_q_eval',
                                   chkpt_dir=self.chkpt_dir)

        self.q_next = Network(self.lr, self.n_actions,
                                   input_dims=self.input_dims,
                                   name='lunar_lander_dueling_ddqn_q_next',
                                   chkpt_dir=self.chkpt_dir)
Example #6
0
    def get(self, address, prefixlen):
        if address is None or prefixlen is None:
            no_networks = request.args.get('no_networks', type=bool, default=False)
            networks = Network.get_all(no_networks = no_networks)
            return jsonify(networks=map(lambda n: n.as_dict(), networks))

        network = Network.get(address).first_or_404()
        return jsonify(network=network.as_dict(compact=False))
Example #7
0
def update_weights(optimizer: optim.Optimizer, network: Network, batch):
    optimizer.zero_grad()

    value_loss = 0
    reward_loss = 0
    policy_loss = 0
    for image, actions, targets in batch:
        # Initial step, from the real observation.
        value, reward, policy_logits, hidden_state = network.initial_inference(
            image)
        predictions = [(1.0 / len(batch), value, reward, policy_logits)]

        # Recurrent steps, from action and previous hidden state.
        for action in actions:
            value, reward, policy_logits, hidden_state = network.recurrent_inference(
                hidden_state, action)
            # TODO: Try not scaling this for efficiency
            # Scale so total recurrent inference updates have the same weight as the on initial inference update
            predictions.append(
                (1.0 / len(actions), value, reward, policy_logits))

            hidden_state = scale_gradient(hidden_state, 0.5)

        for prediction, target in zip(predictions, targets):
            gradient_scale, value, reward, policy_logits = prediction
            target_value, target_reward, target_policy = \
                (torch.tensor(item, dtype=torch.float32, device=value.device.type) \
                for item in target)

            # Past end of the episode
            if len(target_policy) == 0:
                break

            value_loss += gradient_scale * scalar_loss(value, target_value)
            reward_loss += gradient_scale * scalar_loss(reward, target_reward)
            policy_loss += gradient_scale * cross_entropy_with_logits(
                policy_logits, target_policy)

            # print('val -------', value, target_value, scalar_loss(value, target_value))
            # print('rew -------', reward, target_reward, scalar_loss(reward, target_reward))
            # print('pol -------', policy_logits, target_policy, cross_entropy_with_logits(policy_logits, target_policy))

    value_loss /= len(batch)
    reward_loss /= len(batch)
    policy_loss /= len(batch)

    total_loss = value_loss + reward_loss + policy_loss
    scaled_loss = scale_gradient(total_loss, gradient_scale)

    logging.info('Training step {} losses'.format(network.training_steps()) + \
        ' | Total: {:.5f}'.format(total_loss) + \
        ' | Value: {:.5f}'.format(value_loss) + \
        ' | Reward: {:.5f}'.format(reward_loss) + \
        ' | Policy: {:.5f}'.format(policy_loss))

    scaled_loss.backward()
    optimizer.step()
    network.increment_step()
Example #8
0
def create_hostonly(user, host, ip, netmask, lower_ip, upper_ip):
    data_dict = dict(request_type="network", request_id=random_str(), request_userid=user.id,
                     operation_type=CREATE_HOSTONLY, ip=ip, netmask=netmask, lower_ip=lower_ip, upper_ip=upper_ip)

    response_dict = communicate(data_dict, host.ip, host.vm_manager_port)
    net_name = response_dict["net_name"]
    network = Network(name=net_name, type=INTNET, host=host, ip=ip, netmask=netmask, lower_ip=lower_ip,
                      upper_ip=upper_ip, machines=json.dumps([]))
    network.save()
Example #9
0
 def test_network_list(self):
     """Get networks list"""
     for i in xrange(10):
         network = Network(name='Network %i' % i,
                           description='Description %i' % i,
                           user=self.user)
         network.save()
     response = self.client.get(reverse('network_list'))
     self.assertEqual(response.status_code, 200)
Example #10
0
def main():
    # random seed
    seed = 1234
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    # load dataset
    if args.dataset[0] == 'deepfashion':
        ds = pd.read_csv('./Anno/df_info.csv')
        from dataset import DeepFashionDataset as DataManager
    elif args.dataset[0] == 'fld':
        ds = pd.read_csv('./Anno/fld_info.csv')
        from dataset import FLDDataset as DataManager
    else:
        raise ValueError

    print('dataset : %s' % (args.dataset[0]))
    if not args.evaluate:
        train_dm = DataManager(ds[ds['evaluation_status'] == 'train'],
                               root=args.root)
        train_dl = DataLoader(train_dm,
                              batch_size=args.batchsize,
                              shuffle=True)

        if os.path.exists('models') is False:
            os.makedirs('models')

    test_dm = DataManager(ds[ds['evaluation_status'] == 'test'],
                          root=args.root)
    test_dl = DataLoader(test_dm, batch_size=args.batchsize, shuffle=False)

    # Load model
    print("Load the model...")
    net = Network(dataset=args.dataset, flag=args.glem).cuda()
    if not args.weight_file == None:
        weights = torch.load(args.weight_file)
        if args.update_weight:
            weights = utils.load_weight(net, weights)
        net.load_state_dict(weights)

    # evaluate only
    if args.evaluate:
        print("Evaluation only")
        test(net, test_dl, 0)
        return

    # learning parameters
    optimizer = torch.optim.Adam(net.parameters(), lr=args.learning_rate)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 5, 0.1)

    print('Start training')
    for epoch in range(args.epoch):
        lr_scheduler.step()
        train(net, optimizer, train_dl, epoch)
        test(net, test_dl, epoch)
Example #11
0
 def test_network_update(self):
     """Update existing network"""
     network = Network(name='Network', description='Description', user=self.user)
     network.save()
     network_data = {
         'name': 'New name',
         'description': 'New description'
     }
     response = self.client.post(reverse('network_update', args=[network.pk]), network_data)
     self.assertEqual(response.status_code, 302)
Example #12
0
    def __init__(self, state_size, action_size, seed):

        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(seed)

        self.Q = Network(self.state_size, self.action_size, self.seed)
        self.Q_dash = Network(self.state_size, self.action_size, self.seed)

        self.optimizer = optim.Adam(self.Q.parameters(), lr=LR)

        self.replay = ReplayBuffer(self.seed)
        self.t_step = 0
Example #13
0
def initialize_database(app, db):
    """Drop and restore database in a consistent state"""
    with app.app_context():
        db.drop_all()
        db.create_all()
        first_network = Network(name='First Network', site='DEL18DT')
        first_network.sensors.extend([Sensor(name='Bulkhead 5 Water Level', value=50),
                                      Sensor(name='Bulkhead 7 Water Level', value=20),
                                      Sensor(name='Bulkhead 2 Water Level', value=40)])
        second_network = Network(name='Second Network', site='DEL23DT')
        second_network.sensors.extend([Sensor(name='Rain Sensor Front Level', value=250),
                                       Sensor(name='Rain  Sensor Back Level', value=620)])
        db.session.add(first_network)
        db.session.add(second_network)
        db.session.commit()
Example #14
0
def muzero(config: MuZeroConfig):
    # Create core objects
    shared_storage = SharedStorage.remote()
    # TODO: Decide whether to use CPU or GPU for actor networks
    initial_network = Network(config.obs_shape,
                              config.action_space_size,
                              device='cpu')
    shared_storage.save_network.remote(0, initial_network)
    replay_buffer = ReplayBuffer.remote(config)
    writer = TensorboardLogger.remote()

    # Spin up actor processes
    sim_processes = []
    for i in range(config.num_actors):
        logging.debug('Launching actor #{}'.format(i + 1))
        proc = launch_actor_process.remote(config, shared_storage,
                                           replay_buffer, i, writer)
        sim_processes.append(proc)

    launch_trainer_process.remote(config, shared_storage, replay_buffer,
                                  writer)

    # Update buffer size
    while True:
        buffer_size = ray.get(replay_buffer.get_buffer_size.remote())
        logging.debug('Buffer size: {}'.format(buffer_size))
        time.sleep(20)
Example #15
0
def network_list(request, page=None):
    search_phrase = request.GET.get('s')
    if search_phrase and search != None:
        nets = search(Network, search_phrase)
        # TODO
        # filter search results by user access
    else:
        nets = Network.shared_objects(request.user)
        
    paginator = Paginator(list(nets), 10)
    
    page = page or request.GET.get('page', 1)
    try:
        nets = paginator.page(page)
    except PageNotAnInteger:
        nets = paginator.page(1)
    except EmptyPage:
        nets = paginator.page(paginator.num_pages)
    
    extra_context = {
        'networks': nets,
        'url': '/network/network/list/'
    }
    return direct_to_template(request, 'networks/network_list.html',
                              extra_context=extra_context)
Example #16
0
def members(member=None):
    # load network from database
    network = Network.from_dict(
        db.collection('data').document('network').get().to_dict())

    # check to make sure members were successfully loaded
    if network is None:
        return Response(status=503)

    # get member/members from network
    response = network.get_members(member)

    # check that network contained a match
    if response is None:
        return Response(status=404)

    # if specific member was requested, return full member object
    if isinstance(response, Member):
        return jsonify(response.to_dict(abbreviated=False)), 200

    # abbreviate member info (less info)
    member_dict = []
    for member in response.values():
        member_dict.append(member.to_dict(abbreviated=True))

    # return all users
    return jsonify(member_dict), 200
Example #17
0
    def post(self):
        """
        Create a new network
        """

        if "Administrator" != current_user.role:
            return make_response(jsonify({"msg": "Forbidden"}), 403)

        id = request.json.get("id")
        name = request.json.get("name")

        network = Network.query.filter_by(name=name).first()

        if not network:  # If no network exists with that name, then create a new one
            network = Network(id=id, name=name)
            db.session.add(network)
            db.session.commit()
            ret = {'msg': 'Success'}
            return make_response(jsonify(ret), 200)

        else:
            return make_response(
                jsonify({
                    "msg":
                    "Network with that name already exists, please try again with a new name."
                }), 400)
Example #18
0
    def delete(self, address, prefixlen):
        network = Network.get(address, prefixlen).first_or_404()
        if network.owner != g.user:
            abort(401)

        db.session.delete(network)
        db.session.commit()
        return jsonify(message='success')
def network(*layers: List[Union[Layer, Iterable[Layer]]]) -> Network:
    actual = []
    for layer in layers:
        if isinstance(layer, Layer.cls):
            actual.append(layer)
        else:
            actual.extend(layer)
    return Network(*actual)
Example #20
0
def main():
    # random seed
    seed = 1234
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    # load dataset
    if args.dataset[0] == 'deepfashion':
        ds = pd.read_csv('./Anno/df_info.csv')
        from dataset import DeepFashionDataset as DataManager
    elif args.dataset[0] == 'fld':
        ds = pd.read_csv('./Anno/fld_info.csv')
        from dataset import FLDDataset as DataManager
    else :
        raise ValueError

    print('dataset : %s' % (args.dataset[0]))
    if not args.evaluate:
        train_dm = DataManager(ds[ds['evaluation_status'] == 'train'], root=args.root)
        train_dl = DataLoader(train_dm, batch_size=args.batchsize, shuffle=True)

        if os.path.exists('models') is False:
            os.makedirs('models')

    test_dm = DataManager(ds[ds['evaluation_status'] == 'test'], root=args.root)
    test_dl = DataLoader(test_dm, batch_size=args.batchsize, shuffle=False)

    # Load model
    print("Load the model...")
    net_cca = torch.nn.DataParallel(Network(dataset=args.dataset, flag=1)).cuda()
    
    net_fpn = torch.nn.DataParallel(Network(dataset=args.dataset, flag=0)).cuda()
    
    weights = torch.load(weight_cca)
    net_cca.load_state_dict(weights)
    
    weights = torch.load(weight_fpn)
    net_fpn.load_state_dict(weights)

    #print('net:\n' + str(net.module))#TEST

    print("Prediction only")
    predict(net_cca, net_fpn, test_dl, 0)
Example #21
0
def generate_network(data):
    # generate list of member objects
    members = dict()
    for name, value in data.items():
        members[name] = Member(value, raw=True)

    # generate network
    network = Network(members)

    return network
Example #22
0
    def put(self, address, prefixlen):
        network = Network.get(address, prefixlen).first_or_404()

        if 'address' in request.form:
            network.network_address = request.form['address']

        if 'prefixlen' in request.form:
            network.prefixlen = int(request.form['prefixlen'])

        db.session.commit()

        return jsonify(message='success')
Example #23
0
def main_unsupervised_new(used_labels=None):
    trainset = MNIST('train', used_labels)
    validset = MNIST('valid', used_labels)
    net = Network(trainset.n_classes, feature_size=128)
    params = net.parameters()
    criterion = LossUnsupervisedNew()
    optimizer = optim.SGD
    lr_scheduler = MultiStepLR

    trainer = SupervisedTrainer(configer,
                                net,
                                params,
                                trainset,
                                validset,
                                criterion,
                                optimizer,
                                lr_scheduler,
                                num_to_keep=5,
                                resume=False,
                                valid_freq=1,
                                show_embedding=True)
    trainer.train()
    del trainer
Example #24
0
def get_context(file_path: str) -> Context:
    with open(file_path, 'r') as file:
        context_dict = json.load(file)

        context = Context(**context_dict)

        for i in range(len(context.containers)):
            container = context.containers[i] = Container(**context.containers[i])
            container.ports = Port(**container.ports)

        for i in range(len(context.networks)):
            context.networks[i] = Network(**context.networks[i])

        return context
Example #25
0
 def setUp(self):
     self.client = Client()
     
     self.user = User.objects.create_user('user', '*****@*****.**', 'userpassword')
     self.client.login(username='******', password='******')
     
     self.other_user = User.objects.create_user('other', '*****@*****.**', 'otherpassword')
     
     self.host = Host(name="Host", description="Description",
                      ipv4='1.2.3.4', ipv6='2002:0:0:0:0:0:c22:384e',
                      user=self.user)
     self.host.save()
     
     self.net = Network(name="Net", description="Description", user=self.user)
     self.net.save()
Example #26
0
def new_network():
    '''
    Create new network
    '''
    form = NetworkForm()
    form.servers.choices = [(s.name, s.name + " - " + s.description)
                            for s in Server.query.order_by('name')]

    if form.validate_on_submit():
        my_network = Network()
        form.populate_obj(my_network)
        my_network.servers = ",".join(my_network.servers)

        db.session.add(my_network)
        try:
            db.session.commit()
            # User info
            flash('Network created correctly', 'success')
            return redirect(url_for('networks'))
        except:
            db.session.rollback()
            flash('Error generating network.', 'danger')

    return render_template('web/new_network.html', form=form)
Example #27
0
def network_events(request, object_id):
    """Display events related to a network
    """
    network = Network.objects.get(pk=object_id)

    if not network.has_access(request.user):
        return Http404()

    queryset = Network.shared_objects(request.user)
    related_hosts = [nh.host.pk for nh in NetworkHost.objects.filter(network=network)]
    events = Event.objects.filter(source_host__pk__in=related_hosts)
    extra_context = {
        'events': events,
        'can_edit': network.can_edit(request.user)
    }
    return object_detail(request, queryset, object_id,
                         extra_context=extra_context,
                         template_name='networks/network_events.html')
Example #28
0
    def allocate_subnet(self, additional_mask_bits, name):
        from .rest_controller import RestController
        rest = RestController()
        import ipaddress as ip
        net = rest.get_instance(resource='network',
                                resource_id=self.network_id)
        network = Network(**net)
        if type(net) is None:
            pass
        else:
            used_sbns = list(
                map(lambda x: ip.IPv4Network(x.cidr), network.subnets))
            n = ip.IPv4Network(network.cidr)
            psns = list(n.subnets(int(additional_mask_bits)))

            for sbn in used_sbns:
                psns = list(filter(lambda x: not sbn.overlaps(x), psns))

            subnet_cidr = str(psns[0].compressed)

            return subnet_cidr
Example #29
0
def members_filter():
    # return error if no tag was provided
    if not all(arg in request.args for arg in ('field', 'value')):
        return jsonify(
            'Incorrect parameters received: %s, need \'field\' and \'value\'' %
            request.args), 400

    # get tag
    field = request.args['field']
    value = request.args['value']

    # get network from firestore
    network = Network.from_dict(
        db.collection('data').document('network').get().to_dict())

    # get members matching tag
    members = network.get_members_of('(%s, %s)' % (field, value))

    # create serializable response
    matches = []
    for member in members:
        matches.append(member.to_dict(abbreviated=True))

    return jsonify(matches), 200
Example #30
0
def play_game(config: MuZeroConfig, network: Network) -> Game:
    game = Game.from_config(config)

    while not game.terminal() and len(game.history) < config.max_moves:
        # At the root of the search tree we use the representation function to
        # obtain a hidden state given the current observation.
        root = Node(0)
        last_observation = game.make_image(-1)
        root.expand(game.to_play(), game.legal_actions(),
                    network.initial_inference(last_observation).numpy())
        root.add_exploration_noise(config)

        # logging.debug('Running MCTS on step {}.'.format(len(game.history)))
        # We then run a Monte Carlo Tree Search using only action sequences and the
        # model learned by the network.
        run_mcts(config, root, game.action_history(), network)
        action = root.select_action(config, len(game.history), network)
        game.apply(action)
        game.store_search_statistics(root)

    logging.info('Finished episode at step {} | cumulative reward: {}' \
        .format(len(game.obs_history), sum(game.rewards)))

    return game
Example #31
0
 def test_network_detail(self):
     """Get network details"""
     network = Network(name='Network', description='Description', user=self.user)
     network.save()
     response = self.client.get(reverse('network_detail', args=[network.pk]))
     self.assertEqual(response.status_code, 200)
            + '-net=' + args.model                                                   \
            + '-lr=*'                                                                \
            + '-examples_per_class=' + str(args.examples_per_class)                  \
            + '-num_classes=*'                                                       \
            + '-epc_seed=*'                                                          \
            + '-train_seed=*'                                                        \
            + '-epoch=*'                                                             \
            + '.pth'

        model_url = fnmatch.filter(model_urls, pattern)[0]
        model_weights_path, results_path = download_model(model_url, ckpt_dir)
        df = pd.read_csv(results_path)
        row = df.iloc[0]

    if args.model in ['VGG11_bn', 'ResNet18', 'DenseNet3_40', 'MobileNet', 'LeNet']:
        model = Network().construct(args.model, row)
    else:
        raise Exception('Unknown model argument: {}'.format(args.model))

    state_dict = torch.load(model_weights_path, map_location=lambda storage, loc: storage)
    if args.new:
        state_dict = state_dict['model']
    model.load_state_dict(state_dict, strict=True)
    model = model.to(device)

    model = model.eval()

    mean, std = get_mean_std(args.dataset)
    pad = int((row.padded_im_size-row.im_size)/2)
    transform = transforms.Compose([transforms.Pad(pad),
                                    transforms.ToTensor(),
Example #33
0
    # netSubpixel = [Subpixel(intLevel) for intLevel in [2, 3, 4, 5, 6]]

    # print()
    # for s in netSubpixel:
    #     for k, v in s.state_dict().items():
    #         print(k + ': ' + str(v.shape))
    #     print()

    # netRegularization = [Regularization(intLevel) for intLevel in [2, 3, 4, 5, 6]]
    # print()
    # for r in netRegularization:
    #     for k, v in r.state_dict().items():
    #         print(k + ": " + str(v.shape))
    #     print()

    # print("----------------------------------------------------------")
    # flownet = Network()
    # for k, v in flownet.state_dict().items():
    #     print(k + ": " + str(v.shape))

    with dg.guard():
        flownet = Network()
        flownet.eval()
        tenFirst = dg.to_variable(
            np.zeros((1, 3, 1024, 1024)).astype("float32"))
        tenSecond = dg.to_variable(
            np.zeros((1, 3, 1024, 1024)).astype("float32"))
        out = flownet(tenFirst, tenSecond)
        print(out.shape)
Example #34
0
# Download model and its corresponding meta data from URL
# model_path, results_path = download_model(model_url)

# Read CSV
# df = pd.read_csv(results_path)

# First row in CSV, which contains different parameters
# row = df.iloc[0]

# In[4]:

#%% Network

# Initialize network
model = Network().construct(net, row)
model = model.eval()

# Load trained model
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict, strict=False)
model = model.to(device)

gpus = torch.cuda.device_count()
if gpus > 1:
    print("Let's use", gpus, "GPUs!")
    model = nn.DataParallel(model, device_ids=range(gpus))

# In[5]:

#%% Dataset
 def get(self):
     networks = list(Network.objects())
     if not networks:
         abort(404, "networks not found")
     return networks
Example #36
0
    time_limit = args.time_limit
    train_model_name = args.train_model
    restore_path = args.restore_path
    load_version = args.load_version
    save_period = args.save_period
    env = LunarLander()

    max_reward = -100000

    if load_version != 0:
        restore_path = "res/hybriteModel/{}/LunarLander-v2.ckpt".format(
            load_version)

    model = Network(x_shape=env.observation_space.shape[0],
                    y_shape=env.action_space.n,
                    learning_rate=0.0002,
                    gamma=0.99,
                    restore_path=restore_path)

    replBuffer = ReplayBuffer()
    suc_count = 0

    for epoch in range(current_epoch, epochs_count):

        state = env.reset()
        episode_reward = 0
        epoche_observations = []
        epoche_actions = []
        epoche_rewards = []
        time_begin = time.clock()
Example #37
0
def get_batch(in_data, batch_size):
    batch_imgs = np.empty([batch_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS], np.float32)
    batch_data = np.empty([batch_size, N_IN_DATA], np.float32)
    batch_desi = np.empty([batch_size, N_OUT_DATA], np.float32)
    for i in range(batch_size):
        x = randint(1, len(in_data)) - 1
        batch_imgs[i], batch_data[i], batch_desi[i] = in_data[x].load_data()
    return batch_imgs, batch_data, batch_desi

#---------------------------------------------#
# Running the Network
#---------------------------------------------#

# Load the network and the data
data = ld.load_data()
nn = Network()
if LOAD_NETWORK:
    nn.load_network(LOAD_LOCATION)

# Main loop
for i in tqdm(range(4000, 10000000)):
    # Generate the batch and train
    img_batch, data_batch, desired_batch = get_batch(data, BATCH_SIZE)
    loss = nn.train(img_batch, data_batch, desired_batch, USE_WEIGHTED_LOSS)

    # Print the loss
    if i % 20 == 0:
        print i, loss, CHECKPOINT_END

    # Save the network
    if SAVE_NETWORK and (i + 1) % 1000 == 0:
Example #38
0
 def test_network_delete(self):
     """Deleting existing network"""
     network = Network(name='Network', description='Description', user=self.user)
     network.save()
     response = self.client.post(reverse('network_delete', args=[network.pk]))
     self.assertEqual(response.status_code, 302)
Example #39
0
    dir = "./res"

    render = True
    env = LunarLander()
    frames = []

    args = parse_args()
    print(args)
    is_heuristic = args.heuristic
    weight_path = args.weight
    output_path = args.output

    if not is_heuristic:
        model = Network(x_shape=env.observation_space.shape[0],
                        y_shape=env.action_space.n,
                        learning_rate=0.02,
                        gamma=0.99,
                        restore_path=weight_path)

    for i in range(1, 10):
        total_reward = 0
        steps = 0
        s = env.reset()
        epoche_rewards = []
        start = time.clock()
        print("iteration: ", i)

        while True:
            env.render()
            frames.append(Image.fromarray(env.render(mode='rgb_array')))
Example #40
0
class trpo_agent:
    def __init__(self, env, args):
        self.env = env
        self.args = args

        # define the network
        self.net = Network(self.env.observation_space.shape[0],
                           self.env.action_space.shape[0])
        self.old_net = Network(self.env.observation_space.shape[0],
                               self.env.action_space.shape[0])

        # make sure the net and old net have the same parameters
        self.old_net.load_state_dict(self.net.state_dict())

        # define the optimizer
        self.optimizer = torch.optim.Adam(self.net.critic.parameters(),
                                          lr=self.args.lr)

        # define the running mean filter
        self.running_state = ZFilter((self.env.observation_space.shape[0], ),
                                     clip=5)

        if not os.path.exists(self.args.save_dir):
            os.mkdir(self.args.save_dir)
        self.model_path = self.args.save_dir + self.args.env_name
        if not os.path.exists(self.model_path):
            os.mkdir(self.model_path)

        self.start_episode = 0

    def learn(self):

        # configuration
        USER_SAVE_DATE = '3006'
        USER_SAVE_MODEL = 'mymodel.pt'
        CONTINUE_TRAINING = False  # False for new training, True for improving the existing model
        num_of_iteration = 0

        # paths
        date = USER_SAVE_DATE
        plot_path = self.model_path + '/' + date + '/plots/plot_'
        best_model_path = self.model_path + '/' + date + '/best/'
        all_model_path = self.model_path + '/' + date
        reward_path = self.model_path + '/' + date + '/rewards/'

        load_model = CONTINUE_TRAINING
        best_model = all_model_path + '/' + USER_SAVE_MODEL
        all_final_rewards = []

        num_updates = 1000000
        obs = self.running_state(self.env.reset())

        final_reward = 0
        episode_reward = 0
        self.dones = False

        # Load the best model for continuing training
        if load_model:
            print("=> Loading checkpoint...")
            checkpoint = torch.load(best_model)
            self.start_episode = checkpoint['update']
            self.net.load_state_dict(checkpoint['state_dict'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.running_state = checkpoint['running_state']
            final_reward = checkpoint['reward']
            all_final_rewards.append(final_reward)
            #print("=> loaded checkpoint (Episode: {}, reward: {})".format(checkpoint['update'], final_reward))

        for update in range(self.start_episode, num_updates):
            mb_obs, mb_rewards, mb_actions, mb_dones, mb_values = [], [], [], [], []
            for step in range(self.args.nsteps):
                with torch.no_grad():
                    obs_tensor = self._get_tensors(obs)
                    value, pi = self.net(obs_tensor)
                # select actions
                actions = select_actions(pi)
                # store informations
                mb_obs.append(np.copy(obs))
                mb_actions.append(actions)
                mb_dones.append(self.dones)
                mb_values.append(value.detach().numpy().squeeze())
                # start to execute actions in the environment
                obs_, reward, done, _ = self.env.step(actions)
                self.dones = done
                mb_rewards.append(reward)
                if done:
                    obs_ = self.env.reset()
                obs = self.running_state(obs_)
                episode_reward += reward
                mask = 0.0 if done else 1.0
                final_reward *= mask
                final_reward += (1 - mask) * episode_reward
                episode_reward *= mask
            # to process the rollouts
            mb_obs = np.asarray(mb_obs, dtype=np.float32)
            mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
            mb_actions = np.asarray(mb_actions, dtype=np.float32)
            mb_dones = np.asarray(mb_dones, dtype=np.bool)
            mb_values = np.asarray(mb_values, dtype=np.float32)
            # compute the last state value
            with torch.no_grad():
                obs_tensor = self._get_tensors(obs)
                last_value, _ = self.net(obs_tensor)
                last_value = last_value.detach().numpy().squeeze()
            # compute the advantages
            mb_returns = np.zeros_like(mb_rewards)
            mb_advs = np.zeros_like(mb_rewards)
            lastgaelam = 0
            for t in reversed(range(self.args.nsteps)):
                if t == self.args.nsteps - 1:
                    nextnonterminal = 1.0 - self.dones
                    nextvalues = last_value
                else:
                    nextnonterminal = 1.0 - mb_dones[t + 1]
                    nextvalues = mb_values[t + 1]
                delta = mb_rewards[
                    t] + self.args.gamma * nextvalues * nextnonterminal - mb_values[
                        t]
                mb_advs[
                    t] = lastgaelam = delta + self.args.gamma * self.args.tau * nextnonterminal * lastgaelam
            mb_returns = mb_advs + mb_values
            # normalize the advantages
            mb_advs = (mb_advs - mb_advs.mean()) / (mb_advs.std() + 1e-5)
            # before the update, make the old network has the parameter of the current network
            self.old_net.load_state_dict(self.net.state_dict())
            # start to update the network
            policy_loss, value_loss = self._update_network(
                mb_obs, mb_actions, mb_returns, mb_advs)
            #torch.save([self.net.state_dict(), self.running_state], self.model_path + 'model.pt')

            print('Episode: {} / {}, Iteration: {}, Reward: {:.3f}'.format(
                update, num_updates, (update + 1) * self.args.nsteps,
                final_reward))

            all_final_rewards.append(final_reward.item())
            self.save_model_for_training(update,
                                         final_reward.item(),
                                         filepath=best_model_path +
                                         str(round(final_reward.item(), 2)) +
                                         '_' + str(update) + '.pt')

            torch.save([self.net.state_dict(), self.running_state],
                       self.model_path + "/" + date + "/" +
                       str(round(final_reward.item(), 2)) + str(update) +
                       '_testing' + ".pt")

            if update % self.args.display_interval == 0:
                fig = plt.figure()
                ax = fig.add_subplot(111)
                plt.plot(np.arange(len(all_final_rewards)), all_final_rewards)
                plt.ylabel('Reward')
                plt.xlabel('Episode #')
                plt.savefig(plot_path + str(update) + '.png')
                plt.plot()
                reward_df = pd.DataFrame(all_final_rewards)
                with open(reward_path + 'rewards.csv', 'a') as f:
                    reward_df.to_csv(f, header=False)

    def save_model_for_training(self, num_of_iteration, reward, filepath):
        checkpoint = {
            'update': num_of_iteration,
            'state_dict': self.net.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'running_state': self.running_state,
            'reward': reward
        }
        torch.save(checkpoint, filepath)

    # start to update network
    def _update_network(self, mb_obs, mb_actions, mb_returns, mb_advs):
        mb_obs_tensor = torch.tensor(mb_obs, dtype=torch.float32)
        mb_actions_tensor = torch.tensor(mb_actions, dtype=torch.float32)
        mb_returns_tensor = torch.tensor(mb_returns,
                                         dtype=torch.float32).unsqueeze(1)
        mb_advs_tensor = torch.tensor(mb_advs,
                                      dtype=torch.float32).unsqueeze(1)
        # try to get the old policy and current policy
        values, _ = self.net(mb_obs_tensor)
        with torch.no_grad():
            _, pi_old = self.old_net(mb_obs_tensor)
        # get the surr loss
        surr_loss = self._get_surrogate_loss(mb_obs_tensor, mb_advs_tensor,
                                             mb_actions_tensor, pi_old)
        # comupte the surrogate gardient -> g, Ax = g, where A is the fisher information matrix
        surr_grad = torch.autograd.grad(surr_loss, self.net.actor.parameters())
        flat_surr_grad = torch.cat([grad.view(-1) for grad in surr_grad]).data
        # use the conjugated gradient to calculate the scaled direction vector (natural gradient)
        nature_grad = conjugated_gradient(self._fisher_vector_product,
                                          -flat_surr_grad, 10, mb_obs_tensor,
                                          pi_old)
        # calculate the scaleing ratio
        non_scale_kl = 0.5 * (nature_grad * self._fisher_vector_product(
            nature_grad, mb_obs_tensor, pi_old)).sum(0, keepdim=True)
        scale_ratio = torch.sqrt(non_scale_kl / self.args.max_kl)
        final_nature_grad = nature_grad / scale_ratio[0]
        # calculate the expected improvement rate...
        expected_improve = (-flat_surr_grad * nature_grad).sum(
            0, keepdim=True) / scale_ratio[0]
        # get the flat param ...
        prev_params = torch.cat(
            [param.data.view(-1) for param in self.net.actor.parameters()])
        # start to do the line search
        success, new_params = line_search(self.net.actor, self._get_surrogate_loss, prev_params, final_nature_grad, \
                                expected_improve, mb_obs_tensor, mb_advs_tensor, mb_actions_tensor, pi_old)
        set_flat_params_to(self.net.actor, new_params)
        # then trying to update the critic network
        inds = np.arange(mb_obs.shape[0])
        for _ in range(self.args.vf_itrs):
            np.random.shuffle(inds)
            for start in range(0, mb_obs.shape[0], self.args.batch_size):
                end = start + self.args.batch_size
                mbinds = inds[start:end]
                mini_obs = mb_obs[mbinds]
                mini_returns = mb_returns[mbinds]
                # put things in the tensor
                mini_obs = torch.tensor(mini_obs, dtype=torch.float32)
                mini_returns = torch.tensor(mini_returns,
                                            dtype=torch.float32).unsqueeze(1)
                values, _ = self.net(mini_obs)
                v_loss = (mini_returns - values).pow(2).mean()
                self.optimizer.zero_grad()
                v_loss.backward()
                self.optimizer.step()
        return surr_loss.item(), v_loss.item()

    # get the surrogate loss
    def _get_surrogate_loss(self, obs, adv, actions, pi_old):
        _, pi = self.net(obs)
        log_prob = eval_actions(pi, actions)
        old_log_prob = eval_actions(pi_old, actions).detach()
        surr_loss = -torch.exp(log_prob - old_log_prob) * adv
        return surr_loss.mean()

    # the product of the fisher informaiton matrix and the nature gradient -> Ax
    def _fisher_vector_product(self, v, obs, pi_old):
        kl = self._get_kl(obs, pi_old)
        kl = kl.mean()
        # start to calculate the second order gradient of the KL
        kl_grads = torch.autograd.grad(kl,
                                       self.net.actor.parameters(),
                                       create_graph=True)
        flat_kl_grads = torch.cat([grad.view(-1) for grad in kl_grads])
        kl_v = (flat_kl_grads * torch.autograd.Variable(v)).sum()
        kl_second_grads = torch.autograd.grad(kl_v,
                                              self.net.actor.parameters())
        flat_kl_second_grads = torch.cat(
            [grad.contiguous().view(-1) for grad in kl_second_grads]).data
        flat_kl_second_grads = flat_kl_second_grads + self.args.damping * v
        return flat_kl_second_grads

    # get the kl divergence between two distributions
    def _get_kl(self, obs, pi_old):
        mean_old, std_old = pi_old
        _, pi = self.net(obs)
        mean, std = pi
        # start to calculate the kl-divergence
        kl = -torch.log(std / std_old) + (
            std.pow(2) + (mean - mean_old).pow(2)) / (2 * std_old.pow(2)) - 0.5
        return kl.sum(1, keepdim=True)

    # get the tensors
    def _get_tensors(self, obs):
        return torch.tensor(obs, dtype=torch.float32).unsqueeze(0)
        im_size = 96
        epc_seed = 0
        row = Config(input_ch=input_ch,
                     padded_im_size=padded_im_size,
                     num_classes=num_classes,
                     im_size=im_size,
                     epc_seed=epc_seed)
    else:
        raise Exception('this was expected to be an unreachable line')

    if args.model in [
            'VGG11', 'VGG11_bn', 'VGG13', 'VGG13_bn', 'VGG16', 'VGG16_bn',
            'VGG19', 'VGG19_bn', 'ResNet18', 'DenseNet3_40', 'MobileNet',
            'LeNet'
    ]:
        model = Network().construct(args.model, row)
    else:
        raise Exception('Unknown model argument: {}'.format(args.model))

    # state_dict = torch.load(model_weights_path, map_location=lambda storage, loc: storage)
    # if 'model' in state_dict.keys():
    #     state_dict = state_dict['model']
    # model.load_state_dict(state_dict, strict=True)
    # model = model.to(device)

    # model = model.eval()

    mean, std = get_mean_std(args.dataset)
    pad = int((row.padded_im_size - row.im_size) / 2)
    transform = transforms.Compose([
        transforms.Pad(pad),
Example #42
0
class UserAccessTest(TestCase):
    """Tests for user access and sharing objects"""
    
    def setUp(self):
        self.client = Client()
        
        self.user = User.objects.create_user('user', '*****@*****.**', 'userpassword')
        self.client.login(username='******', password='******')
        
        self.other_user = User.objects.create_user('other', '*****@*****.**', 'otherpassword')
        
        self.host = Host(name="Host", description="Description",
                         ipv4='1.2.3.4', ipv6='2002:0:0:0:0:0:c22:384e',
                         user=self.user)
        self.host.save()
        
        self.net = Network(name="Net", description="Description", user=self.user)
        self.net.save()
        
    def test_user_host_access(self):
        access = user_has_access(self.host, self.user)
        self.assertEqual(access, True)
        
        access = user_has_access(self.host, self.other_user)
        self.assertEqual(access, False)
        
    def test_user_host_share(self):
        grant_access(self.host, self.other_user)
        access = user_has_access(self.host, self.other_user)
        self.assertEqual(access, True)
        
        access = user_can_edit(self.host, self.other_user)
        self.assertEqual(access, True)
        
        revoke_edit(self.host, self.other_user)
        access = user_can_edit(self.host, self.other_user)
        self.assertEqual(access, False)
        
        revoke_access(self.host, self.other_user)
        access = user_has_access(self.host, self.other_user)
        self.assertEqual(access, False)
        
    def test_user_network_access(self):
        access = user_has_access(self.net, self.user)
        self.assertEqual(access, True)
        
        access = user_has_access(self.net, self.other_user)
        self.assertEqual(access, False)
        
    def test_user_network_share(self):
        grant_access(self.net, self.other_user)
        access = user_has_access(self.net, self.other_user)
        self.assertEqual(access, True)
        
        access = user_can_edit(self.net, self.other_user)
        self.assertEqual(access, True)
        
        revoke_edit(self.net, self.other_user)
        access = user_can_edit(self.net, self.other_user)
        self.assertEqual(access, False)
        
        revoke_access(self.net, self.other_user)
        access = user_has_access(self.net, self.other_user)
        self.assertEqual(access, False)