Ejemplo n.º 1
0
                        train = train[int(0.95 * len(train)):]
                        test = test[int(0.8 * len(test)):]
                    elif (sub_data == 2):
                        train = train[int(0.8 * len(train)):]
                        test = test[int(0.6 * len(test)):]
            local = LocalUpdateMTL(args=args, data_train=train, data_test=test)
            local_list_users.append(local)

        glob_acc = []
        train_acc = []
        train_loss = []
        avg_acc = []

        for iter in range(args.num_global_iters):
            if (experiment):
                experiment.set_epoch(iter + 1)
            w_glob = {}
            loss_locals = []
            m = max(int(args.subusers * args.num_users), 1)
            idxs_users = np.random.choice(range(args.num_users),
                                          m,
                                          replace=False)

            W = torch.zeros((d, m)).to(args.device)  #.cuda()

            # update W
            for idx, user in enumerate(idxs_users):
                W_local = [
                    net_local_list[user].state_dict()[key].flatten()
                    for key in w_glob_keys
                ]
Ejemplo n.º 2
0
        "x",
        "y",
        "w",
        "h",
        "conf",
        "cls",
        "cls_acc",
        "recall50",
        "recall75",
        "precision",
        "conf_obj",
        "conf_noobj",
    ]

    for epoch in range(opt.epochs):
        experiment.set_epoch(epoch)
        model.train()
        start_time = time.time()
        for batch_i, (_, imgs, targets) in enumerate(dataloader):
            batches_done = len(dataloader) * epoch + batch_i

            imgs = Variable(imgs.to(device))
            targets = Variable(targets.to(device), requires_grad=False)

            loss, outputs = model(imgs, targets)
            loss.backward()

            if batches_done % opt.gradient_accumulations:
                # Accumulates gradient before each step
                optimizer.step()
                optimizer.zero_grad()