Esempio n. 1
0
def main():
    if not torch.cuda.is_available():
        logging.info('no gpu device available')
        sys.exit(1)

    if args.auxiliary and args.net_type == 'macro':
        logging.info('auxiliary head classifier not supported for macro search space models')
        sys.exit(1)

    logging.info("args = %s", args)

    cudnn.enabled = True
    cudnn.benchmark = True
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Data
    _, valid_transform = utils._data_transforms_cifar10(args)

    valid_data = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
    valid_queue = torch.utils.data.DataLoader(
        valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=1)

    # classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

    # Model
    if args.net_type == 'micro':
        logging.info("==> Building micro search space encoded architectures")
        genotype = eval("genotypes.%s" % args.arch)
        net = PyrmNASNet(args.init_channels, num_classes=10, layers=args.layers,
                         auxiliary=args.auxiliary, genotype=genotype,
                         increment=args.filter_increment, SE=args.SE)
    elif args.net_type == 'macro':
        genome = eval("macro_genotypes.%s" % args.arch)
        channels = [(3, 128), (128, 128), (128, 128)]
        net = EvoNetwork(genome, channels, 10, (32, 32), decoder='dense')
    else:
        raise NameError('Unknown network type, please only use supported network type')

    # logging.info("{}".format(net))
    logging.info("param size = %fMB", utils.count_parameters_in_MB(net))

    net = net.to(device)
    # no drop path during inference
    net.droprate = 0.0
    utils.load(net, args.model_path)

    criterion = nn.CrossEntropyLoss()
    criterion.to(device)

    # inference on original CIFAR-10 test images
    infer(valid_queue, net, criterion)
Esempio n. 2
0
def main():
    args = parser.parse_args()
    spec = cluster_spec(args.n_tasks, 1)
    cluster = tf.train.ClusterSpec(spec).as_cluster_def()
    cls = load("agents.actorcritic.a3c_worker:" + args.cls)
    config = json_to_dict(args.config)
    task = cls(args.env_id, args.task_id, cluster, args.monitor_path, config, video=args.video)
    task.learn()
Esempio n. 3
0
def main():
    comm = MPI.Comm.Get_parent()
    task_id = comm.Get_rank()
    args = parser.parse_args()
    cls = load("agents.ppo.dppo_worker:" + args.cls)
    config = json_to_dict(args.config)

    task = cls(args.env_id, task_id, comm, args.monitor_path, config,
               args.seed)
    task.run()
Esempio n. 4
0
def make_agent(name: str, state_dimensions: str, action_space: str, RNN: bool = False, **args):
    """Make an agent of a given name, possibly using extra arguments."""
    try:
        Agent = agent_registry[name]
        Agent = next((agent_type for agent_type in Agent if
                      agent_type["action_space"] == action_space and
                      agent_type["state_dimensions"] == state_dimensions and
                      agent_type["RNN"] == RNN))
        Agent = Agent["entry_point"]
        if not callable(Agent):
            Agent = load(Agent)
    except (KeyError, StopIteration):
        raise ClassNotRegisteredError(
            "The agent {} for state dimensionality {}, action space {} and RNN={} is not registered.".format(
                name,
                state_dimensions,
                action_space,
                RNN))
    return Agent(**args)