示例#1
0
def main(operator: Operator, cfg: DictConfig) -> None:
    tasks: List[Dict[str, Any]] = [{}] * cfg.num_tasks
    mnist_model = mnist(pretrained=True)

    def handle_with_model(
            _request_id: str, args: Dict[str, Any],
            agent_state: RemoteProcedureAgentState) -> Dict[str, Any]:
        """Convert the image to be read by MNIST classifier, then classify"""
        img_dat = args["urlData"].split("data:image/png;base64,")[1]
        im = Image.open(BytesIO(base64.b64decode(img_dat)))
        im_gray = im.convert("L")
        im_resized = im_gray.resize((28, 28))
        im_vals = list(im_resized.getdata())
        norm_vals = [(255 - x) * 1.0 / 255.0 for x in im_vals]
        in_tensor = torch.tensor([norm_vals])
        output = mnist_model(in_tensor)
        pred = output.data.max(1)[1]
        print("Predicted digit:", pred.item())
        return {
            "digit_prediction": pred.item(),
        }

    function_registry = {
        "classify_digit": handle_with_model,
    }

    shared_state = SharedRemoteProcedureTaskState(
        static_task_data=tasks,
        function_registry=function_registry,
    )

    task_dir = cfg.task_dir
    build_custom_bundle(task_dir)

    operator.launch_task_run(cfg.mephisto, shared_state)
    operator.wait_for_runs_then_shutdown(skip_input=True, log_rate=30)
    print('{}: {}'.format(k, v))
print("========================================")

# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# data loader
train_loader, test_loader = dataset.get(batch_size=args.batch_size,
                                        data_root=args.data_root,
                                        num_workers=1)

# model
model = model.mnist(input_dims=784, n_hiddens=[256, 256], n_class=10)
model = torch.nn.DataParallel(model, device_ids=range(args.ngpu))
if args.cuda:
    model.cuda()

# optimizer
optimizer = optim.SGD(model.parameters(),
                      lr=args.lr,
                      weight_decay=args.wd,
                      momentum=0.9)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
    # ready to go
示例#3
0
print("=================FLAGS==================")
for k, v in args.__dict__.items():
    print('{}: {}'.format(k, v))
print("========================================")

# seed
args.cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# data loader
train_loader, test_loader = dataset.get(batch_size=args.batch_size, data_root=args.data_root, num_workers=1)

# model
model = model.mnist(input_dims=784, n_hiddens=[512, 512, 512], n_class=10)
model = torch.nn.DataParallel(model, device_ids= range(args.ngpu))
if args.cuda:
    model.cuda()

# optimizer
optimizer = optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.wd, momentum=0.9)
decreasing_lr = list(map(int, args.decreasing_lr.split(',')))
print('decreasing_lr: ' + str(decreasing_lr))
best_acc, old_file = 0, None
t_begin = time.time()
try:
    # ready to go
    for epoch in range(args.epochs):
        model.train()
        if epoch in decreasing_lr: