示例#1
0
    print(net_glob)
    net_glob.train()

    # copy weights
    w_init = copy.deepcopy(net_glob.state_dict())

    local_acc_final = []
    total_acc_final = []
    local_acc = np.zeros([args.num_users, args.epochs])
    total_acc = np.zeros([args.num_users, args.epochs])

    # training
    for idx in range(args.num_users):
        # print(w_init)
        net_glob.load_state_dict(w_init)
        optimizer = optim.Adam(net_glob.parameters())
        train_loader = DataLoader(DatasetSplit(dataset_train, dict_users[idx]),
                                  batch_size=64,
                                  shuffle=True)
        image_trainset_weight = np.zeros(10)
        for label in np.array(dataset_train.targets)[dict_users[idx]]:
            image_trainset_weight[label] += 1
        image_trainset_weight = image_trainset_weight / image_trainset_weight.sum(
        )
        list_loss = []
        net_glob.train()
        for epoch in range(args.epochs):
            batch_loss = []
            for batch_idx, (data, target) in enumerate(train_loader):
                data, target = data.to(args.device), target.to(args.device)
                optimizer.zero_grad()
示例#2
0
    # build model
    if args.model == 'lenet' and (args.dataset == 'cifar' or args.dataset == 'fmnist'):
        net_glob = CNNCifar(args=args).to(args.device)
    elif args.model == 'vgg' and args.dataset == 'cifar':
        net_glob = vgg16().to(args.device)
    else:
        exit('Error: unrecognized model')
    print(net_glob)
    img = dataset_train[0][0].unsqueeze(0).to(args.device)
    writer.add_graph(net_glob, img)

    # training
    creterion = nn.CrossEntropyLoss()
    train_loader = DataLoader(dataset_train, batch_size=64, shuffle=True)
    # optimizer = optim.Adam(net_glob.parameters())
    optimizer = optim.SGD(net_glob.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    # scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=40, gamma=0.1)
    # # # scheduler.step()

    list_loss = []
    net_glob.train()
    for epoch in range(args.epochs):
        batch_loss = []
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(args.device), target.to(args.device)
            optimizer.zero_grad()
            output = net_glob(data)
            loss = creterion(output, target)
            loss.backward()
            optimizer.step()
            if batch_idx % 50 == 0:
示例#3
0
    if args.model == 'cnn' and args.dataset == 'cifar':
        net_glob = CNNCifar(args=args).to(args.device)
    elif args.model == 'cnn' and args.dataset == 'mnist':
        net_glob = CNNMnist(args=args).to(args.device)
    elif args.model == 'mlp':
        len_in = 1
        for x in img_size:
            len_in *= x
        net_glob = MLP(dim_in=len_in, dim_hidden=64,
                       dim_out=args.num_classes).to(args.device)
    else:
        exit('Error: unrecognized model')
    print(net_glob)

    # training
    optimizer = optim.SGD(net_glob.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    train_loader = DataLoader(dataset_train, batch_size=64, shuffle=True)

    list_loss = []
    net_glob.train()
    for epoch in range(args.epochs):
        batch_loss = []
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(args.device), target.to(args.device)
            optimizer.zero_grad()
            output = net_glob(data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            optimizer.step()
示例#4
0
    # build model
    if args.model == 'cnn' and args.dataset == 'cifar':
        net_glob = CNNCifar(args=args).to(args.device)
    elif args.model == 'cnn' and args.dataset == 'mnist':
        net_glob = CNNMnist(args=args).to(args.device)
    elif args.model == 'mlp':
        len_in = 1
        for x in img_size:
            len_in *= x
        net_glob = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
    else:
        exit('Error: unrecognized model')
    print(net_glob)

    # training
    optimizer = optim.SGD(net_glob.parameters(), lr=args.lr, momentum=args.momentum)
    train_loader = DataLoader(dataset_train, batch_size=64, shuffle=True)

    list_loss = []
    net_glob.train()
    for epoch in range(args.epochs):
        batch_loss = []
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(args.device), target.to(args.device)
            optimizer.zero_grad()
            output = net_glob(data)
            loss = F.cross_entropy(output, target)
            loss.backward()
            optimizer.step()
            if batch_idx % 50 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(