def get_network_info() -> Response:
    """
    Returns data about the target network
    """

    token = request.headers.get('Token')
    session = Session.find(token)
    if session is None:
        return make_response({
            "error": "token does not exists"
        })

    network_id = request.form.get("id")
    response = dict

    if network_id:
        network_list = [Network.get_by_id(network_id)]
    else:
        network_list = Network.get_by_owner(session.owner)  # TODO check if .all() returns list, else fix to list

    if not network_list:
        return make_response({
            "error": "no networks found"
        })

    for network in network_list:
        if session.owner == network.owner:
            response.update({network.id: network.as_private_simple_dict()})
        else:
            response.update({network.id: network.as_public_simple_dict()})

    return make_response(response)
예제 #2
0
    def parse(filename):
        net = Network()
        sbml = libsbml.SBMLReader()

        parsed = sbml.readSBML(filename)
        model = parsed.getModel()

        # Evaluate and store global parameters in a symbol table
        symbols = SbmlParser._get_symbols(model)
        if not symbols:
            return False
        net.symbols = symbols

        # Initialise species and their initial amounts
        net.species = SbmlParser._get_species(model)

        time_multipler = SbmlParser._get_time_multipler(model)

        # Parse reactions and create CustomReaction objects
        net.reactions = SbmlParser._get_reactions(model, time_multipler, net)

        if not net.reactions:
            return False

        return net
예제 #3
0
 def get_channel_power(self, distance):
     """ Calculates channel power that depends on distance"""
     if self.type == UserType.PRIMARY:
         return 4.7e13 / (
             (4 * math.pi * Network.get_channel_frequency(self.channel_id) *
              distance)**2)
     elif self.type == UserType.SECONDARY:
         return 2.6e13 / (
             (4 * math.pi * Network.get_channel_frequency(self.channel_id) *
              distance)**2)
예제 #4
0
    def __init__(self, teacher_path):
        super().__init__()

        teacher_yaml = teacher_path.parent / 'config.yaml'
        teacher_args = load_yaml(teacher_yaml)['model_args']
        teacher = Network(**teacher_args)
        teacher.load_state_dict(torch.load(str(teacher_path)))

        self.teacher = teacher
        self.converter = ConverterTorch()
 def __init__(self, img, truth, is_training, batcn_norm_decay=0.997):
     self.img = img
     self.truth = truth
     self.is_training = is_training
     self.batch_norm_decay = batcn_norm_decay
     self.img_shape = tf.shape(self.img)
     backbone = Network()
     if is_training:
         self.head, self.l2_loss = backbone.inference(self.is_training, self.img)
     else:
         self.head = backbone.inference(self.is_training, self.img)
예제 #6
0
 def __init__(self, img, age_labels, age_vector, is_training, batcn_norm_decay=0.997):
     self.img = img
     self.age_labels = age_labels
     self.age_vector = age_vector
     self.is_training = is_training
     self.batch_norm_decay = batcn_norm_decay
     self.img_shape = tf.shape(self.img)
     backbone = Network()
     if is_training:
         self.feats, self.pred, self.l1_loss = backbone.inference(self.is_training, self.img)
     else:
         self.feats, self.pred = backbone.inference(self.is_training, self.img)
예제 #7
0
def main(config):
    teacher = Teacher(config['teacher_path']).to(config['device'])

    net = Network(**config['model_args']).to(config['device'])
    data_train, data_val = get_dataset(config['source'])(**config['data_args'])

    optim = torch.optim.Adam(net.parameters(), **config['optimizer_args'])
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optim,
        milestones=[mult * config['max_epoch'] for mult in [0.5, 0.75]],
        gamma=0.5)

    wandb.init(project='task-distillation-07',
               config=config,
               id=config['run_name'],
               resume='auto')
    wandb.save(str(Path(wandb.run.dir) / '*.t7'))

    if wandb.run.resumed:
        resume_project(net, optim, scheduler, config)
    else:
        wandb.run.summary['step'] = 0
        wandb.run.summary['epoch'] = 0
        wandb.run.summary['best_epoch'] = 0

    resume_epoch = max(wandb.run.summary['epoch'],
                       wandb.run.summary['best_epoch'])

    for epoch in tqdm.tqdm(range(resume_epoch + 1, config['max_epoch'] + 1),
                           desc='epoch',
                           position=0):
        wandb.run.summary['epoch'] = epoch

        checkpoint_project(net, optim, scheduler, config)

        loss_train = train_or_eval(teacher, net, data_train, optim, True,
                                   config)
        print(loss_train)

        with torch.no_grad():
            loss_val = train_or_eval(teacher, net, data_val, None, False,
                                     config)
        wandb.log({'train/loss_epoch': loss_train, 'val/loss_epoch': loss_val})

        if loss_val < wandb.run.summary.get('best_val_loss', np.inf):
            wandb.run.summary['best_val_loss'] = loss_val
            wandb.run.summary['best_epoch'] = epoch
            torch.save(net.state_dict(), Path(wandb.run.dir) / 'model_best.t7')

        if epoch % 10 == 0:
            torch.save(net.state_dict(),
                       Path(wandb.run.dir) / ('model_%03d.t7' % epoch))
예제 #8
0
 def __init__(self, config: MuZeroConfig, storage: SharedStorage,
              replay_buffer: ReplayBuffer):
     self.config = config
     self.storage = storage
     self.replay_buffer = replay_buffer
     self.summary = create_summary(name="leaner")
     self.metrics_loss = Mean(f'leaner-loss', dtype=tf.float32)
     self.network = Network(self.config)
     self.lr_schedule = ExponentialDecay(
         initial_learning_rate=self.config.lr_init,
         decay_steps=self.config.lr_decay_steps,
         decay_rate=self.config.lr_decay_rate)
     self.optimizer = Adam(learning_rate=self.lr_schedule)
예제 #9
0
    def __init__(self, path_to_conf_file):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.transform = torchvision.transforms.ToTensor()
        self.converter = ConverterTorch().to(self.device)

        self.target_index = 65
        self.speed_mult = 2.5

        path_to_conf_file = Path(path_to_conf_file)
        config = load_yaml(path_to_conf_file.parent / 'config.yaml')

        self.net = Network(**config['model_args']).to(self.device)
        self.net.load_state_dict(torch.load(path_to_conf_file))
        self.net.eval()
def delete_network() -> Response:
    """
    Delete a network by id
    """

    token = request.headers.get('Token')
    session = Session.find(token)
    if session is None:
        return make_response({
            "error": "token does not exists"
        })

    network_id = request.form.get("id")
    network = Network.get_by_id(network_id)
    if network_id:
        if session.owner != network.owner:
            return make_response({
                "error": "permission denied"
            })

        network.delete()
    else:
        return make_response({
            "error": "id missing, or not existing"
        })

    return make_response({
        # TODO response needed? Or is missing response allowed?
    })
예제 #11
0
파일: __init__.py 프로젝트: ipsec/muzero
class SharedStorage(object):

    def __init__(self, config: MuZeroConfig):
        self.config = config
        self.network = Network(self.config)
        self._started = False

    def get_network_weights(self):
        return self.network.get_weights()

    def update_network(self, weights):
        self.network.set_weights(weights)
        if not self._started:
            self._started = True

    def started(self):
        return self._started
예제 #12
0
 def post(self):
     data = request.json
     network = Network(**data)
     db.session.add(network)
     db.session.commit()
     response = jsonify({})
     response.status_code = 201
     return response
예제 #13
0
파일: actor.py 프로젝트: ipsec/muzero
 def __init__(self,
              config: MuZeroConfig,
              storage: SharedStorage,
              replay_buffer: ReplayBuffer,
              temperature: float = 1.0):
     self.config = config
     self.network = Network(self.config)
     self.storage = storage
     self.replay_buffer = replay_buffer
     self.temperature = temperature
     self.name = f"games-{temperature}"
     self.summary = create_summary(name=self.name)
     self.games_played = 0
     self.metrics_games = Sum(self.name, dtype=tf.int32)
     self.metrics_temperature = Sum(self.name, dtype=tf.float32)
     self.metrics_rewards = Mean(self.name, dtype=tf.float32)
     self.started = False
예제 #14
0
    def __init__(
        self,
        env: UnityEnvironment,
        memory_size: int,
        batch_size: int,
        target_update: int,
        epsilon_decay: float = 1 / 2000,
        max_epsilon: float = 1.0,
        min_epsilon: float = 0.1,
        gamma: float = 0.99,
        ):
        self.brain_name = env.brain_names[0]
        self.brain = env.brains[self.brain_name]
        env_info = env.reset(train_mode=True)[self.brain_name]
        self.env = env
        action_size = self.brain.vector_action_space_size
        state = env_info.vector_observations[0]
        state_size = len(state)
        
        self.obs_dim = state_size
        self.action_dim = 1

        self.memory = ReplayBuffer(self.obs_dim, self.action_dim, memory_size, batch_size)


        self.batch_size = batch_size
        self.target_update = target_update
        self.epsilon_decay = epsilon_decay
        self.max_epsilon = max_epsilon
        self.min_epsilon = min_epsilon
        self.gamma = gamma
        self.epsilon = max_epsilon

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        
        self.dqn = Network(self.obs_dim, self.action_dim)
        self.dqn_target = Network(self.obs_dim, self.action_dim)
        self.dqn_target.load_state_dict(self.dqn.state_dict())
        self.dqn_target.eval()

        self.optimizer = optim.Adam(self.dqn.parameters(), lr=5e-5)

        self.transition = list()

        self.is_test = False
예제 #15
0
    def __init__(self, name, args):

        self.input_filename = args['input']
        self.output_filename = args['output']
        if 'init' in args:
            self.init_filename = args['init']
        else:
            self.init_filename = None

        with open(args['input']) as f:
            json_data = f.read()
        input = json.loads(json_data)

        # Merge Initial Condition
        input = merge_initial_condition(args, input)

        daeSimulation.__init__(self)
        self.m = Network(name, Parent=None, Description="", data=input)
예제 #16
0
    def _networks(cls) -> List[Network]:
        addresses = psutil.net_if_addrs()
        counters = psutil.net_io_counters(pernic=True)

        return [
            Network(
                name=nic,
                address=cls._network_address(nic, addresses),
                received_bytes=stats.bytes_recv,
                sent_bytes=stats.bytes_sent,
            ) for nic, stats in counters.items()
        ]
예제 #17
0
def main(
    epochs: int,
    batch_size: int,
    learning_rate: float,
    l1_coef: float,
    l2_coef: float,
) -> None:

    net = Network(
        Sigmoid(784, 800),
        SoftmaxCrossEntropy(800, 10),
    )

    mnist = MNIST()
    for n in range(epochs):

        for images, labels in mnist.train_set.minibatches(batch_size):
            net.train_step(
                data=images,
                labels=labels,
                learning_rate=learning_rate,
                l1_coef=l1_coef,
                l2_coef=l2_coef,
            )

        preds = net.forward(mnist.train_set.images)
        epoch_acc = np.mean(
            preds.argmax(1) == mnist.train_set.labels.argmax(1))
        print(f'Epoch {n + 1} / {epochs} train accuracy: {epoch_acc.round(2)}')

    preds = net.forward(mnist.test_set.images)
    test_acc = np.mean(preds.argmax(1) == mnist.test_set.labels.argmax(1))
    print(f'Test accuracy: {test_acc.round(2)}')
예제 #18
0
def main(config):
    # from thop import profile
    # net = Network(**config['model_args'])
    # input = torch.randn(1, 6, 160, 384)  # 模型输入的形状,batch_size=1
    # flops, params = profile(net, inputs=(input,))
    # print(flops / 1e9, 'G', params / 1e6)  # flops单位G,para单位M
    # aa


    data_train, data_val = get_dataset(config['source'])(**config['data_args'])
    net = Network(**config['model_args']).to(config['device'])
    net.load_state_dict(torch.load(str(config['model_path'])))

    # wandb.init(
    #     project='task-distillation-eval',
    #     config=config, id=config['run_name'], resume='auto')
    # wandb.save(str(Path(wandb.run.dir) / '*.t7'))

    with torch.no_grad():
        MAE_val, RMSE_val, EVS_val = net_eval(net, data_val, config)

    print(' MAE_val', MAE_val)
    print(' RMSE_val: ', RMSE_val)
    print(' EVS_val: ', EVS_val)
예제 #19
0
def fetch_networks(client, network_name, network_subnet):

    # Make the API call to get all objects of type 'network'
    # matching the ip address
    response = client.api_call("show-objects", {
        "filter": network_subnet,
        "ip-only": True,
        "type": "network"
    })

    # Checks if request was a success
    response_logger(response,
                    Const.MESSAGE_OBJECT_PROCESSING.format(network_name))

    # Return network list
    return Network.getInstances(response)
예제 #20
0
def create_network(client, network_name, network_subnet, subnet_mask):

    # Fetch all networks matching with subnet
    networks = fetch_networks(client, network_name, network_subnet)

    # Check if returned networks is not none
    if networks is not None:

        # Filter the list that matches matches subnet and subnet mask and
        # called next on the list
        # If no networks found, it will return None
        network = next(
            filter(
                lambda net: net.subnet == network_subnet and net.subnet_mask ==
                subnet_mask,
                networks,
            ),
            None,
        )

        # We check if filtered network is None
        if network is not None:
            # Then the network already exists
            # Displays a message for the user
            display(Const.MESSAGE_NETWORK_ALREADY_PRESENT.format(network.name))
            # Return the network
            return network

    # If code reaches this line, it means
    # No networks were found in the filters
    # We then create the network
    response = client.api_call(
        "add-network",
        {
            "name": network_name,
            "subnet": network_subnet,
            "subnet-mask": subnet_mask,
        },
    )

    response_logger(response,
                    Const.MESSAGE_OBJECT_CREATED.format(network_name))

    # We then return the new network object
    return Network.getInstance(response)
예제 #21
0
def mainUtil():
    network = Network([5, 1, 1], identical, dIdentical)
    inputData, outputData = readData()
    errors = []
    iterations = []
    for i in range(1000):
        iterations.append(i)
        error = []
        for j in range(len(inputData)):
            error.append(network.computeLoss(inputData[j], outputData[j])[0])
            network.backwardPropagate(
                network.computeLoss(inputData[j], outputData[j]), 0.00000001)
        errors.append(sum([(x**0.08) / len(error) for x in error]))
        for j in range(len(inputData)):
            network.feedForward(inputData[j])

    print(str(network))
    plt.plot(iterations, errors, label='loss value vs iteration')
    plt.xlabel('Iterations')
    plt.ylabel('loss function')
    plt.legend()
    plt.show()
예제 #22
0
파일: __init__.py 프로젝트: ipsec/muzero
def run_mcts(config: MuZeroConfig, root: Node, action_history: ActionHistory,
             network: Network, min_max_stats: MinMaxStats):
    for _ in range(config.num_simulations):
        history = action_history.clone()
        node = root
        search_path = [node]

        while node.expanded():
            action, node = select_child(config, node, min_max_stats)
            history.add_action(action)
            search_path.append(node)

        # Inside the search tree we use the dynamics function to obtain the next
        # hidden state given an action and the previous hidden state.
        parent = search_path[-2]
        network_output = network.recurrent_inference(parent.hidden_state,
                                                     history.last_action())
        expand_node(node, history.to_play(), history.action_space(),
                    network_output)

        backpropagate(search_path, network_output.value, history.to_play(),
                      config.discount, min_max_stats)
예제 #23
0
파일: train.py 프로젝트: zankner/Dnc
 def __init__(self, params):
     self.lr = params.lr
     self.epochs = params.epochs
     # Define loss:
     self.loss_object =
     # Define optimizer:
     self.optimizer =
     # Define metrics for loss:
     self.train_loss =
     self.train_accuracy =
     self.test_loss =
     self.test_accuracy =
     # Define model:
     self.model = Network()
     # Define pre processor (params):
     preprocessor = Process()
     self.train_ds, self.test_ds = preprocessor.get_datasets()
     # Define Checkpoints:
     self.ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=self.optimizer,
             net=self.model)
     # Define Checkpoint manager:
     self.ckpt_manager = tf.train.CheckpointManager(self.ckpt, f'checkpoints{params.ckpt_dir}',
             max_to_keep=3)
def add_device_to_network() -> Response:
    """
    Add a device to network
    """

    token = request.headers.get('Token')
    session = Session.find(token)
    if session is None:
        return make_response({
            "error": "token does not exists"
        })

    network_id = request.form.get("id")
    network = Network.get_by_id(network_id)
    if network_id:
        if session.owner != network.owner:
            return make_response({
                "error": "permission denied"
            })
    else:
        return make_response({
            "error": "id missing, or not existing"
        })
    def start(self):
        """ Runs simulation with given seed"""
        Simulation.semaphore.acquire()
        self.env = simpy.Environment()

        random.seed(self.seed)

        print(change_style("[Simulation #{}]".format(self.id), 'blue') + change_style(" Generating contents", "info"))
        self.contents = Content.generate()

        print(change_style("[Simulation #{}]".format(self.id), 'blue') + change_style(
            " Generating secondary users and fill up their caches",
            "info"))
        self.users = User.generate(self.env, self.contents)

        self.network = Network(self.env)
        # Create PU arrivals
        self.env.process(self.arrival_process(LAMBDA_PRIMARY_USER, UserType.PRIMARY))

        # Create SU arrivals
        self.env.process(self.arrival_process(LAMBDA_SECONDARY_USER, UserType.SECONDARY))

        print(change_style("[Simulation #{}]".format(self.id), 'blue') + change_style(" Starting", "info"))
        self.env.run(until=self.time)
        print(change_style("[Simulation #{}]".format(self.id), 'blue') + change_style(" Ending", "info"))

        self.logger.save()
        Simulation.semaphore.release()

        performance = self.calculate_performance()
        Simulation.lock.acquire()
        Simulation.performances['latency'] += performance['latency']
        Simulation.performances['p']['sq'] += performance['p']['sq']
        Simulation.performances['p']['hq']['base'] += performance['p']['hq']['base']
        Simulation.performances['p']['hq']['enh']['base_local_hit'] += performance['p']['hq']['enh']['base_local_hit']
        Simulation.performances['p']['hq']['enh']['base_d2d'] += performance['p']['hq']['enh']['base_d2d']
        Simulation.lock.release()
 def __init__(self):
     self.network_url = Network().network_url
     self.parser = Parser()
     self.client = AsyncHTTPClient()
     self.logger = get_logger('http-bridge')
예제 #27
0
class DQNAgent:

    def __init__(
        self,
        env: UnityEnvironment,
        memory_size: int,
        batch_size: int,
        target_update: int,
        epsilon_decay: float = 1 / 2000,
        max_epsilon: float = 1.0,
        min_epsilon: float = 0.1,
        gamma: float = 0.99,
        ):
        self.brain_name = env.brain_names[0]
        self.brain = env.brains[self.brain_name]
        env_info = env.reset(train_mode=True)[self.brain_name]
        self.env = env
        action_size = self.brain.vector_action_space_size
        state = env_info.vector_observations[0]
        state_size = len(state)
        
        self.obs_dim = state_size
        self.action_dim = 1

        self.memory = ReplayBuffer(self.obs_dim, self.action_dim, memory_size, batch_size)


        self.batch_size = batch_size
        self.target_update = target_update
        self.epsilon_decay = epsilon_decay
        self.max_epsilon = max_epsilon
        self.min_epsilon = min_epsilon
        self.gamma = gamma
        self.epsilon = max_epsilon

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        
        self.dqn = Network(self.obs_dim, self.action_dim)
        self.dqn_target = Network(self.obs_dim, self.action_dim)
        self.dqn_target.load_state_dict(self.dqn.state_dict())
        self.dqn_target.eval()

        self.optimizer = optim.Adam(self.dqn.parameters(), lr=5e-5)

        self.transition = list()

        self.is_test = False

    def select_action(self, state: np.ndarray) -> np.int64:
        """ Select an action given input """
        if self.epsilon > np.random.random():
            selected_action = np.random.random_integers(0, self.action_dim-1)
        else:
            selected_action = self.dqn(
                torch.FloatTensor(state).to(self.device)
            )
            selected_action = np.argmax(selected_action.detach().cpu().numpy())

        
        if not self.is_test:
            self.transition = [state, selected_action]
        
        return selected_action

    def step(self, action: np.int64) -> Tuple[np.ndarray, np.float64, bool]:
        "Take an action and return environment response"
        env_info = self.env.step(action)[self.brain_name]
        next_state = env_info.vector_observations[0]   
        reward = env_info.rewards[0]                   
        done = env_info.local_done[0]
    
        if not self.is_test:
            self.transition += [reward, next_state, done]
            self.memory.store(*self.transition)

        return next_state, reward, done

    def update_model(self) -> torch.Tensor:
        """ Update model by gradient descent"""
        samples = self.memory.sample_batch()
        loss = self._compute_dqn_loss(samples)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        return loss.item()

    def train(self, num_episode: int, max_iteration: int=1000, plotting_interval: int=400):
        """  train the agent """
        self.is_test = False

        env_info = self.env.reset(train_mode=True)[self.brain_name]
        state = env_info.vector_observations[0]

        update_cnt = 0
        epsilons = []
        losses = []
        avg_losses= []
        scores = []
        avg_scores = []

        for episode in range(num_episode):
            env_info = self.env.reset(train_mode=True)[self.brain_name]
            state = env_info.vector_observations[0]
            score = 0
            for iter in range(max_iteration):
                action = self.select_action(state)
                next_state, reward, done = self.step(action)
                state = next_state
                score += reward
                if done:
                    break

                if len(self.memory) > self.batch_size:
                    loss = self.update_model()
                    losses.append(loss)
                    update_cnt += 1

            avg_losses.append(np.mean(losses))
            losses = []
            self.epsilon = max(
                self.min_epsilon, self.epsilon - (
                    self.max_epsilon - self.min_epsilon
                ) * self.epsilon_decay
            )
            epsilons.append(self.epsilon)
            
            if update_cnt % self.target_update == 0:
                self._target_hard_update()
            scores.append(score)
            epsilons.append(self.epsilon)

            if episode >= 100:
                avg_scores.append(np.mean(scores[-100:]))
            self._plot(episode, scores, avg_scores, avg_losses, epsilons)
        torch.save(self.dqn.state_dict(), "model_weight/dqn.pt")



    def test(self):
        """ Test agent """
        self.is_test = True
        env_info = self.env.reset(train_mode=False)[self.brain_name]
        state = env_info.vector_observations[0]
        done = False
        score = 0

        while not done:
            action = self.select_action(state)
            next_state, reward, done = self.step(action)

            state = next_state
            score += reward

        print("score: ", score)
        self.env.close()


    def _compute_dqn_loss(self, samples: Dict[str, np.ndarray], gamma: float=0.99) -> torch.Tensor:
        """ Compute and return DQN loss"""
        gamma = self.gamma
        device = self.device
        state = torch.FloatTensor(samples["obs"]).to(device)
        next_state = torch.FloatTensor(samples["next_obs"]).to(device)
        action = torch.LongTensor(samples["acts"]).reshape(-1, 1).to(device)
        reward = torch.FloatTensor(samples["rews"]).reshape(-1, 1).to(device)
        done = torch.FloatTensor(samples["done"]).reshape(-1, 1).to(device)
        
        curr_q_value = self.dqn(state).gather(1, action)
            
        next_q_value = self.dqn_target(next_state).max(dim=1, keepdim=True)[0].detach()
        mask = 1 - done
        target = (reward + gamma * next_q_value * mask).to(device)
        loss = F.smooth_l1_loss(curr_q_value, target)

        return loss


    def _target_hard_update(self):
        """ update target network """
        self.dqn_target.load_state_dict(self.dqn.state_dict())

    def _plot(
        self,
        episode :int,
        scores: List[float],
        avg_scores: List[float],
        losses: List[float],
        epsilons: List[float]
    ):
        """ Plot the training process"""
        plt.figure(figsize=(20, 5))
        plt.subplot(141)
        if len(avg_scores) > 0:
            plt.title("Average reward per 100 episodes. Score: %s" % (avg_scores[-1]))
        else:
            plt.title("Average reward over 100 episodes.")
        plt.plot([100 + i for i in range(len(avg_scores))], avg_scores)
        plt.subplot(142)
        plt.title("episode %s. Score: %s" % (episode, np.mean(scores[-10:])))
        plt.plot(scores)
        plt.subplot(143)
        plt.title('Loss')
        plt.plot(losses)
        plt.subplot(144)
        plt.title('epsilons')
        plt.plot(epsilons)
        plt.savefig('plots/dqn_result.png')
예제 #28
0
def create_new(num_inputs, num_outputs):
    apicache.current_network = Network(num_inputs, num_outputs)

    return NetworkChangeSuccessResponse()
def main():
    if not torch.cuda.is_available():
        logging.info('No GPU device available')
        sys.exit(1)
    np.random.seed(args.super_seed)
    cudnn.benchmark = True
    torch.manual_seed(args.super_seed)
    cudnn.enabled = True
    torch.cuda.manual_seed(args.super_seed)
    logging.info("args = %s", args)
    logging.info("unparsed args = %s", unparsed)

    # prepare dataset
    if args.cifar100:
        train_transform, valid_transform = utils._data_transforms_cifar100(args)
    else:
        train_transform, valid_transform = utils._data_transforms_cifar10(args)
    if args.cifar100:
        train_data = dset.CIFAR100(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
        valid_data = dset.CIFAR100(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform)
    else:
        train_data = dset.CIFAR10(root=args.tmp_data_dir, train=True, download=True, transform=train_transform)
        valid_data = dset.CIFAR10(root=args.tmp_data_dir, train=False, download=True, transform=valid_transform)

    train_queue = torch.utils.data.DataLoader(
        train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=args.workers, drop_last=True)

    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=args.workers, drop_last=True)

    ood_queues = {}
    for k in ['svhn', 'lsun_resized', 'imnet_resized']:
        ood_path = os.path.join(args.ood_dir, k)
        dset_ = dset.ImageFolder(ood_path, valid_transform)
        loader = torch.utils.data.DataLoader(
            dset_, batch_size=args.batch_size, shuffle=False,
            pin_memory=True, num_workers=args.workers
        )
        ood_queues[k] = loader

    # build Network
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    supernet = Network(
        args.init_channels, CIFAR_CLASSES, args.layers,
        combine_method=args.feat_comb, is_cosine=args.is_cosine,
    )
    supernet.cuda()
    supernet.generate_share_alphas()   #This is to prevent supernet alpha attribute being None type

    alphas_path = './results/{}/eval_out/{}/alphas.pt'.format(args.load_at.split('/')[2], args.folder)
    logging.info('Loading alphas at: %s' % alphas_path)
    alphas = torch.load(alphas_path)

    subnet = supernet.get_sub_net(alphas[:, :-1])
    logging.info(alphas)

    if args.cifar100:
        weight_decay = 5e-4
    else:
        weight_decay = 3e-4
    optimizer = torch.optim.SGD(
        subnet.parameters(),
        args.learning_rate,
        momentum=args.momentum,
        weight_decay=weight_decay,
    )
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs), eta_min=args.learning_rate_min)

    for epoch in range(args.epochs):
        logging.info('epoch {} lr {:.4f}'.format(epoch, scheduler.get_last_lr()[0]))

        train_acc, _ = train(train_queue, subnet, criterion, optimizer)
        logging.info('train_acc {:.2f}'.format(train_acc))

        valid_acc, valid_loss = infer(valid_queue, subnet, criterion)
        writer_va.add_scalar('loss', valid_loss, global_step)
        writer_va.add_scalar('acc', valid_acc, global_step)
        logging.info('valid_acc {:.2f}'.format(valid_acc))
        scheduler.step()

    if not os.path.exists(args.ckpt_path):
        os.makedirs(args.ckpt_path)
    utils.save(subnet, os.path.join(args.ckpt_path, 'subnet_{}_weights.pt'.format(args.folder)))

    lg_aucs, sm_aucs, ent_aucs = ood_eval(valid_queue, ood_queues, subnet, criterion)

    logging.info('Writting results:')
    out_dir = './results/{}/eval_out/{}/'.format(args.load_at.split('/')[2], args.folder)
    with open(os.path.join(out_dir, 'subnet_scratch.txt'), 'w') as f:
        f.write('-'.join([str(valid_acc), str(lg_aucs), str(sm_aucs), str(ent_aucs)]))
예제 #30
0
class Planner(object):
    def __init__(self, path_to_conf_file):
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.transform = torchvision.transforms.ToTensor()
        self.converter = ConverterTorch().to(self.device)

        self.target_index = 65
        self.speed_mult = 2.5

        path_to_conf_file = Path(path_to_conf_file)
        config = load_yaml(path_to_conf_file.parent / 'config.yaml')

        self.net = Network(**config['model_args']).to(self.device)
        self.net.load_state_dict(torch.load(path_to_conf_file))
        self.net.eval()

    @torch.no_grad()
    def run_step(self, rgb, rgb_forward, viz=None):
        if Modular:
            # Modularity and Abstract
            rgb = Image.fromarray(rgb).convert('RGB')
            img = input_transform_cityscapes(rgb)
            img = img.cuda().unsqueeze(0)
            rgb_forward = Image.fromarray(rgb_forward).convert('RGB')
            img_forward = input_transform_cityscapes(rgb_forward)
            img_forward = img_forward.cuda().unsqueeze(0)

            output = model(img)
            label = output[0].max(0)[1].byte().cpu().data
            label_color = Colorize()(label.unsqueeze(0))
            rgb = ToPILImage()(label_color)
            rgb.save('./seg.jpg')

            output = model(img_forward)
            label = output[0].max(0)[1].byte().cpu().data
            label_color = Colorize()(label.unsqueeze(0))
            rgb_forward = ToPILImage()(label_color)
            rgb_forward.save('./seg_2.jpg')

        img = self.transform(rgb).to(self.device).unsqueeze(0)
        img_forward = self.transform(rgb_forward).to(self.device).unsqueeze(0)

        # print(img_forward.shape)
        model_input = torch.cat((img_forward, img), 1)

        cam_coords = self.net(model_input)
        cam_coords[..., 0] = (cam_coords[..., 0] +
                              1) / 2 * img.shape[-1]  # rgb coords
        cam_coords[..., 1] = (cam_coords[..., 1] + 1) / 2 * img.shape[-2]

        map_coords = self.converter.cam_to_map(
            cam_coords).cpu().numpy().squeeze()
        world_coords = self.converter.cam_to_world(
            cam_coords).cpu().numpy().squeeze()

        target_speed = np.sqrt(
            ((world_coords[:2] - world_coords[1:3])**2).sum(1).mean())
        target_speed *= self.speed_mult

        theta1 = np.degrees(np.arctan2(world_coords[0][0], world_coords[0][1]))
        theta2 = np.degrees(np.arctan2(world_coords[4][0], world_coords[4][1]))
        # print(abs(theta2 - theta1))
        if abs(theta2 - theta1) < 2:
            target_speed *= self.speed_mult
        else:
            target_speed *= 1.2

        curve = spline(map_coords + 1e-8 * np.random.rand(*map_coords.shape),
                       100)
        target = curve[self.target_index]

        curve_world = spline(
            world_coords + 1e-8 * np.random.rand(*world_coords.shape), 100)
        target_world = curve_world[self.target_index]

        if viz:
            viz.planner_draw(cam_coords.cpu().numpy().squeeze(), map_coords,
                             curve, target)

        return target_world, target_speed