예제 #1
0
파일: main.py 프로젝트: zetaha/globibot
def main():
    args = parse_args()
    bot_config, web_config, db_config = load_config(args.config_path)

    web_app = init_web_app(web_config)

    globibot = Globibot(bot_config, db_config, web_app, args.plugin_path)

    run_async(web_app.run(), globibot.boot())
예제 #2
0
def main():
    args = parse_args()
    bot_config, web_config, db_config = load_config(args.config_path)

    web_app = init_web_app(web_config)

    globibot = Globibot(
        bot_config,
        db_config,
        web_app,
        args.plugin_path
    )

    run_async(
        web_app.run(),
        globibot.boot()
    )
예제 #3
0
        clients = [
            Client(copy.deepcopy(extractor), self.trainloader[i], server,
                   self.args, self.cls_num_list[i], epoch)
            for i in range(len(self.trainloader))
        ]
        client_w = None
        for client in clients:
            print(f"| Global Round: {epoch} | client index: {index} |")
            client.train(client_w)
            client_w = client.get_weight()
            index += 1
        return client_w, server.get_weight()


if __name__ == "__main__":
    args = parse_args()
    data_name = 'cifar10' if not args.cifar100 else 'cifar100'
    num_classes = 10 if not args.cifar100 else 100
    TAG = 'multi-mixsl-mixsum' + str(
        args.mix_num) + '-' + data_name + '-' + args.name
    print(f'{TAG}: training start....')
    setup_seed(args.seed, True if args.gpu > -1 else False)
    logs = []
    if args.cifar100:
        train_dataset, test_dataset = get_cifar100(args.balanced)
    else:
        train_dataset, test_dataset = get_cifar10(args.balanced)
    user_groups = random_avg_strategy(train_dataset, args.num_users)
    cls_num_per_clients = count_class_num_per_client(train_dataset,
                                                     user_groups, 100)
    logs_file = TAG
예제 #4
0
        ])
    z_all = torch.from_numpy(np.array(z_all))

    model.n_query = n_query
    scores = model.set_forward(z_all, is_feature=True)
    pred = scores.data.cpu().numpy().argmax(axis=1)
    y = np.repeat(range(n_way), n_query)
    acc = np.mean(pred == y) * 100
    return acc


# --- main ---
if __name__ == '__main__':

    # parse argument
    params = parse_args('test')
    print('Testing! {} shots on {} dataset with {} epochs of {}({})'.format(
        params.n_shot, params.dataset, params.save_epoch, params.name,
        params.method))
    remove_featurefile = True

    print('\nStage 1: saving features')
    # dataset
    print('  build dataset')
    image_size = 224
    split = params.split
    loadfile = os.path.join(params.data_dir, params.dataset, split + '.json')
    datamgr = SimpleDataManager(image_size, batch_size=64)
    data_loader = datamgr.get_data_loader(loadfile, aug=False)

    print('  build feature encoder')
예제 #5
0
      print('GG!! best accuracy {:f}'.format(max_acc))
    if ((epoch + 1) % params.save_freq==0) or (epoch == stop_epoch - 1):
      outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch + 1))
      model.save(outfile, epoch)

  return


# --- main function ---
if __name__=='__main__':

  # set numpy random seed
  np.random.seed(10)

  # parse argument
  params = parse_args('train')
  print('--- LFTNet training: {} ---\n'.format(params.name))
  print(params)

  # outputs and tensorboard dir
  params.tf_dir = './logs/%s'%(params.name)
  params.checkpoint_dir = '%s/checkpoints/%s'%(params.save_dir, params.name)
  ensurepath(params.tf_dir)
  ensurepath(params.checkpoint_dir)

  # dataloader
  print('\n--- prepare dataloader ---')
  print('  train with multiple seen domains (unseen domain: {})'.format(params.testset))
  datasets = ['miniImagenet', 'cars', 'places', 'cub', 'plantae']
  datasets.remove(params.testset)
  val_file = os.path.join(params.data_dir, 'miniImagenet', 'val.json')