コード例 #1
0
ファイル: views.py プロジェクト: KidsXH/FLHeteroBackend
def client(request):
    if request.method == 'POST':
        request_data = json.loads(request.body)
        client_name = request_data['clientName']

        if client_name not in rs['client_names']:
            return HttpResponseBadRequest

        rs.set('client', client_name)
        rs.set('annotations', [])

        client_idx = np.where(rs['client_names'] == client_name)

        history = load_history(rs['dataset'])
        loss = history['loss'][client_idx][0]
        val_acc = history['val_acc'][client_idx][0]
        tot_acc = history['tot_acc'][client_idx][0]
        train_size, test_size = get_load_data_size(rs['dataset'], client_name)

        data = {
            'loss': loss.tolist(),
            'valAcc': val_acc.tolist(),
            'totAcc': tot_acc.tolist(),
            'trainSize': int(train_size),
            'testSize': int(test_size),
        }
        return JsonResponse(data)
コード例 #2
0
def check_update() -> List[News]:
    history = load_history()

    using_crawlers = filter(lambda c: c.SITE_NAME in TOKEN_TABLE.keys(),
                            get_all_crawler_classes())
    with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        params = [(clazz, history) for clazz in using_crawlers]
        result = executor.map(crawl_news_with_class, params)

    save_history(history)
    return sum(list(result), [])
コード例 #3
0
ファイル: views.py プロジェクト: KidsXH/FLHeteroBackend
def datasets(request):
    if request.method == 'GET':
        data = {'datasetNames': list(DATA_HOME.keys())}
        return JsonResponse(data)
    if request.method == 'POST':
        request_data = json.loads(request.body)
        dataset_name = request_data['datasetName']

        history = load_history(dataset_name)
        samples_data = np.load(os.path.join(DATA_HOME[dataset_name],
                                            'samples.npz'),
                               allow_pickle=True)
        client_names = samples_data['client_names']
        n_clients = history['n_clients']
        n_rounds = history['loss'].shape[1]

        rs.set('dataset', dataset_name)
        rs.set('client_names', client_names)
        rs.set('n_clients', n_clients)
        rs.set('n_rounds', n_rounds)
        rs.set('data_shape', samples_data['shape'])
        rs.set('data_type', samples_data['type'])

        data = {
            'type': str(samples_data['type']),
            'dimensions': samples_data['shape'].tolist(),
            'samplingTypes': samples_data['sampling_types'].tolist(),
            'numberOfClients': n_clients,
            'clientNames': client_names.tolist(),
            'communicationRounds': n_rounds,
        }

        # for (k, v) in data.items():
        #     print('{}: {} {}'.format(k, type(v), np.shape(v)))

        return JsonResponse(data)
コード例 #4
0
test_loader = DataLoader(
    dataset=test_dataset,
    batch_size=batch_size,
)

# ResNet50 with pretraining
model = resnet50(pretrained=True)
model.to(device)

print('Start training ResNet50 with pretraining...')
history = train_nn(
    'resnet50_with_pretraining', model, epochs_50,
    optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4),
    loss_func, train_loader, test_loader, device)

resnet50_history = load_history(50)
resnet50_history[0] = history
save_history(50, resnet50_history)

# ResNet18 with pretraining
model = resnet18(pretrained=True)
model.to(device)

print('Start training ResNet18 with pretraining...')
history = train_nn(
    'resnet18_with_pretraining', model, epochs_18,
    optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-4),
    loss_func, train_loader, test_loader, device)

resnet18_history = load_history(18)
resnet18_history[0] = history
コード例 #5
0
def train(input_placeholder, output_data, sess):
    # build cost function
    action = tf.placeholder("float", [None, ACTIONS_CHOICE_NUMBER])
    y = tf.placeholder("float", [None])
    y_action = tf.reduce_sum(tf.multiply(output_data, action),
                             reduction_indices=1)
    cost = tf.reduce_mean(tf.square(y - y_action))
    train_step = tf.train.AdamOptimizer(1e-6).minimize(cost)

    # start game
    game_state = game.GameState()

    global_timestamp = 0
    epsilon = EPSILON

    memory = Memory(MEMORY_SIZE, FRAME_NUM_PER_STACK)

    # start network
    sess.run(tf.global_variables_initializer())
    # network checkpoint saver and restore loader
    saver = tf.train.Saver()
    checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_FOLDER)
    if checkpoint and checkpoint.model_checkpoint_path and os.path.exists(
            os.path.join(CHECKPOINT_FOLDER, "global_state.pkl")):
        data = load_history(os.path.join(CHECKPOINT_FOLDER,
                                         "global_state.pkl"))
        global_timestamp = data['global_timestamp']
        epsilon = data['epsilon']
        memory = data['memory']
        game_state = data['game_state']
        saver.restore(sess, checkpoint.model_checkpoint_path)
        logging.info("restored from checkpoint",
                     extra={
                         'stage': get_stage_name(global_timestamp),
                         'timestamp': global_timestamp,
                         'epsilon': epsilon,
                         'reward': "",
                         'action': ""
                     })
    else:
        image_data, _, _ = game_state.frame_step(actions.NOTHING)
        memory.initial_stack(image_data)

    prev_state = memory.get_current_stack()

    while True:
        actions_scores = output_data.eval(
            feed_dict={input_placeholder: [prev_state]})[0]

        action_name, action_choice = actions.get_next_action(
            epsilon, actions_scores)

        image_data, reward, game_terminate = game_state.frame_step(
            action_choice)
        memory.stack_frame(image_data)

        new_state = memory.get_current_stack()
        memory.remember(prev_state, action_choice, reward, new_state,
                        game_terminate)

        # anneal
        if global_timestamp > OBSERVE_DURATION and epsilon > MIN_EPSILON:
            logging.info("start anneal",
                         extra={
                             'stage': get_stage_name(global_timestamp),
                             'timestamp': global_timestamp,
                             'epsilon': epsilon,
                             'reward': reward,
                             'action': action_name
                         })
            epsilon -= float(EPSILON - MIN_EPSILON) / ANNEAL_DURATION

        # explore + train
        if global_timestamp > OBSERVE_DURATION:
            prev_state_batch, action_batch, reward_batch, new_state_batch, game_terminate_batch = memory.get_sample_batches(
                BATCH_SIZE)

            y_batch = []
            evaluate = output_data.eval(
                feed_dict={input_placeholder: new_state_batch})
            for i, game_terminate in enumerate(game_terminate_batch):
                # train target to reward
                if game_terminate:
                    y_batch.append(reward_batch[i])
                else:
                    y_batch.append(reward_batch[i] +
                                   GAMMA * np.max(evaluate[i]))

            # gradient
            train_step.run(
                feed_dict={
                    y: y_batch,
                    action: action_batch,
                    input_placeholder: new_state_batch
                })

        if global_timestamp % CHECKPOINT_GAP == 0:
            saver.save(sess,
                       os.path.join(CHECKPOINT_FOLDER, 'flappy-bird'),
                       global_step=global_timestamp)
            save_history(
                os.path.join(CHECKPOINT_FOLDER, 'global_state.pkl'), {
                    'global_timestamp': global_timestamp,
                    'epsilon': epsilon,
                    'memory': memory,
                    'game_state': game_state
                })
            logging.info("checkpoint saved",
                         extra={
                             'stage': get_stage_name(global_timestamp),
                             'timestamp': global_timestamp,
                             'epsilon': epsilon,
                             'reward': reward,
                             'action': ""
                         })

        # update state
        prev_state = new_state
        logging.info("finish epoch",
                     extra={
                         'stage': get_stage_name(global_timestamp),
                         'timestamp': global_timestamp,
                         'epsilon': epsilon,
                         'reward': reward,
                         'action': action_name
                     })
        global_timestamp += 1
コード例 #6
0
ファイル: main.py プロジェクト: alzayats/MASAGA
    parser.add_argument('-mList', '--mList', default=None, type=str, nargs="+")
    parser.add_argument('-eList', '--eList', default=None, type=int, nargs="+")
    parser.add_argument('-lList', '--lList', default=None, nargs="+")
    parser.add_argument('-sList', '--sList', default=None, type=str, nargs="+")

    args = parser.parse_args()

    dList, mList, eList, lList, sList = experiments.get_experiment(
        args.exp, args)
    mList.sort()
    sList.sort()

    create_plot = False
    results = {}
    for d, m, e, l, s in product(dList, mList, eList, lList, sList):
        history = ut.load_history(d, m, l, e, s, args.reset)

        if args.mode == "train":
            if len(history["loss"]) == 0 or args.reset:
                train.train(dataset_name=d,
                            model_name=m,
                            epochs=e,
                            learning_rate=l,
                            sampling_method=s,
                            reset=args.reset)

        if args.mode == "summary":
            if len(history["loss"]) == 0:
                continue
            results[history["exp_name"]] = history["loss"][-1]
コード例 #7
0
ファイル: train.py プロジェクト: alzayats/MASAGA
def train(dataset_name, model_name, learning_rate, epochs, 
          sampling_method, reset, save_img=False):
    # epochs=2
    history = ut.load_history(dataset_name, model_name, 
                              learning_rate, epochs, 
                              sampling_method, reset)
    print("Running {}".format(history["exp_name"]))

    # SET SEED
    np.random.seed(1)
    torch.manual_seed(1) 
    torch.cuda.manual_seed_all(1)

    # Get Datasets
    Z, shape = da.DATASETS[dataset_name]()
    Z = Z - Z.mean(0)
    n = Z.shape[0]

    nList = np.arange(n)
    

    if sampling_method == "uniform":
        P = nList / nList.sum()
    elif sampling_method == "lipschitz":
        L = loss_eigenvector.Lipschitz(Z)
        P = L / L.sum()
    
    L = loss_eigenvector.Lipschitz(Z).max()    
    if isinstance(learning_rate, str):
        lr = get_learning_rate(L, learning_rate)
    else:
        lr = learning_rate

    # MODEL
    model = models.MODELS[model_name](Z=Z, 
                        F_func=loss_eigenvector.Loss, 
                        G_func=loss_eigenvector.Gradient, 
                        lr=lr)
    # Solve    
    x_sol = loss_eigenvector.leading_eigenvecor(Z)
    loss_min = loss_eigenvector.Loss(x_sol, Z)
    # sanity_checks.test_lossmin(model.x, Z, loss_min)    
    # sanity_checks.test_gradient(torch.FloatTensor(x_sol)[:,None], Z)    
    # sanity_checks.test_batch_loss_grad(model.x, Z)
    e = 0.
    n_iters = 0.

    # Train
    with torch.no_grad():
        while e < (epochs + 1):
            next_epoch = True

            # inner loop
            for ii in range(n):
                e = n_iters / float(n)
                # Verbose
                if ii % 500 == 0:
                    L =  (float((model.F_func(model.x, Z))) - float(loss_min)) / np.abs(float(loss_min))
                    
                    history["loss"] += [{"loss":L, "epoch":e}]  
                    print("Epoch: %.2f/%d - %s - lr %f - loss: %.3f" % 
                         (e, epochs, history["exp_name"], lr,
                          (L*n)))

                # select a random sample
                i = np.random.choice(nList, replace=True, p=P)

                # make a step
                n_evals = model.step(Z[i], index=i, next_epoch=next_epoch)
                next_epoch = False

                n_iters += n_evals
        
    # After training is done
    ut.save_json(history["path_save"], history)
    torch.save(model.state_dict(), history['path_model'])
    print("model saved in {}".format( history['path_model']))
コード例 #8
0
def history():
    from utils import load_history
    cached_history = load_history()
    print(f"There are {len(cached_history)} sites.")
    for site, cache in cached_history.items():
        print(f"{site} has {len(cache)} history.")