def handler(event, context):

    function_name = "lambda_core"

    # dataset setting
    dataset_name = 'higgs'
    data_bucket = "higgs-10"
    dataset_type = "dense_libsvm"
    n_features = 30
    tmp_table_name = "tmp-params"
    merged_table_name = "merged-params"
    key_col = "key"

    # hyper-parameters
    n_clusters = 10
    n_epochs = 10
    threshold = 0.0001

    # training setting
    sync_mode = "reduce"  # reduce or reduce_scatter
    n_workers = 10

    # clear dynamodb table
    dynamo_client = dynamo_operator.get_client()
    tmp_tb = DynamoTable(dynamo_client, tmp_table_name)
    merged_tb = DynamoTable(dynamo_client, tmp_table_name)
    tmp_tb.clear(key_col)
    merged_tb.clear(key_col)

    # lambda payload
    payload = dict()
    payload['dataset'] = dataset_name
    payload['data_bucket'] = data_bucket
    payload['dataset_type'] = dataset_type
    payload['n_features'] = n_features
    payload['tmp_table_name'] = tmp_table_name
    payload['merged_table_name'] = merged_table_name
    payload['key_col'] = key_col
    payload['n_clusters'] = n_clusters
    payload['n_epochs'] = n_epochs
    payload['threshold'] = threshold
    payload['sync_mode'] = sync_mode
    payload['n_workers'] = n_workers

    # invoke functions
    lambda_client = boto3.client('lambda')
    for i in range(n_workers):
        payload['worker_index'] = i
        payload['file'] = '{}_{}'.format(i, n_workers)
        lambda_client.invoke(FunctionName=function_name,
                             InvocationType='Event',
                             Payload=json.dumps(payload))
def handler(event, context):
    start_time = time.time()

    # dataset setting
    file = event['file']
    data_bucket = event['data_bucket']
    dataset_type = event['dataset_type']
    assert dataset_type == "dense_libsvm"
    n_features = event['n_features']
    n_classes = event['n_classes']
    n_workers = event['n_workers']
    worker_index = event['worker_index']
    tmp_table_name = event['tmp_table_name']
    merged_table_name = event['merged_table_name']
    key_col = event['key_col']

    # training setting
    model_name = event['model']
    optim = event['optim']
    sync_mode = event['sync_mode']
    assert model_name.lower() in MLModel.Linear_Models
    assert optim.lower() == Optimization.ADMM
    assert sync_mode.lower() in [
        Synchronization.Reduce, Synchronization.Reduce_Scatter
    ]

    # hyper-parameter
    learning_rate = event['lr']
    batch_size = event['batch_size']
    n_epochs = event['n_epochs']
    valid_ratio = event['valid_ratio']
    n_admm_epochs = event['n_admm_epochs']
    lam = event['lambda']
    rho = event['rho']

    print('data bucket = {}'.format(data_bucket))
    print("file = {}".format(file))
    print('number of workers = {}'.format(n_workers))
    print('worker index = {}'.format(worker_index))
    print('model = {}'.format(model_name))
    print('optimization = {}'.format(optim))
    print('sync mode = {}'.format(sync_mode))

    s3_storage = S3Storage()
    dynamo_client = dynamo_operator.get_client()
    tmp_table = DynamoTable(dynamo_client, tmp_table_name)
    merged_table = DynamoTable(dynamo_client, merged_table_name)
    communicator = DynamoCommunicator(dynamo_client, tmp_table, merged_table,
                                      key_col, n_workers, worker_index)

    # Read file from s3
    read_start = time.time()
    lines = s3_storage.load(file,
                            data_bucket).read().decode('utf-8').split("\n")
    print("read data cost {} s".format(time.time() - read_start))

    parse_start = time.time()
    dataset = libsvm_dataset.from_lines(lines, n_features, dataset_type)
    print("parse data cost {} s".format(time.time() - parse_start))

    preprocess_start = time.time()
    # Creating data indices for training and validation splits:
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(valid_ratio * dataset_size))
    shuffle_dataset = True
    random_seed = 100
    if shuffle_dataset:
        np.random.seed(random_seed)
        np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    # Creating data samplers and loaders:
    train_sampler = SubsetRandomSampler(train_indices)
    valid_sampler = SubsetRandomSampler(val_indices)
    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=batch_size,
                                               sampler=train_sampler)
    n_train_batch = len(train_loader)
    validation_loader = torch.utils.data.DataLoader(dataset,
                                                    batch_size=batch_size,
                                                    sampler=valid_sampler)
    print("preprocess data cost {} s, dataset size = {}".format(
        time.time() - preprocess_start, dataset_size))

    model = linear_models.get_model(model_name, n_features, n_classes)

    z, u = initialize_z_and_u(model.linear.weight.data.size())
    print("size of z = {}".format(z.shape))
    print("size of u = {}".format(u.shape))

    # Loss and Optimizer
    # Softmax is internally computed.
    # Set parameters to be updated.
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

    # Training the Model
    train_start = time.time()
    for admm_epoch in range(n_admm_epochs):
        print(">>> ADMM Epoch[{}]".format(admm_epoch))
        admm_epoch_start = time.time()
        admm_epoch_cal_time = 0
        admm_epoch_comm_time = 0
        admm_epoch_test_time = 0
        for epoch in range(n_epochs):
            epoch_start = time.time()
            epoch_loss = 0.
            for batch_index, (items, labels) in enumerate(train_loader):
                batch_start = time.time()
                items = Variable(items.view(-1, n_features))
                labels = Variable(labels)

                # Forward + Backward + Optimize
                optimizer.zero_grad()
                outputs = model(items)
                classify_loss = criterion(outputs, labels)
                epoch_loss += classify_loss.item()
                u_z = torch.from_numpy(u) - torch.from_numpy(z)
                loss = classify_loss
                for name, param in model.named_parameters():
                    if name.split('.')[-1] == "weight":
                        loss += rho / 2.0 * torch.norm(param + u_z, p=2)
                        # loss = classify_loss + rho / 2.0 * torch.norm(torch.sum(model.linear.weight, u_z))
                optimizer.zero_grad()
                loss.backward(retain_graph=True)
                optimizer.step()

            epoch_cal_time = time.time() - epoch_start
            admm_epoch_cal_time += epoch_cal_time

            # Test the Model
            test_start = time.time()
            n_test_correct = 0
            n_test = 0
            test_loss = 0
            for items, labels in validation_loader:
                items = Variable(items.view(-1, n_features))
                labels = Variable(labels)
                outputs = model(items)
                test_loss += criterion(outputs, labels).item()
                _, predicted = torch.max(outputs.data, 1)
                n_test += labels.size(0)
                n_test_correct += (predicted == labels).sum()
            epoch_test_time = time.time() - test_start
            admm_epoch_test_time += epoch_test_time

            print(
                'Epoch: [%d/%d], Step: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f, '
                'cal cost %.4f s, test cost %.4f s, accuracy of the model on the %d test samples: %d %%, loss = %f'
                % (epoch + 1, n_epochs, batch_index + 1, n_train_batch,
                   time.time() - train_start, epoch_loss,
                   time.time() - epoch_start, epoch_cal_time, epoch_test_time,
                   n_test, 100. * n_test_correct / n_test, test_loss / n_test))

        sync_start = time.time()
        w = model.linear.weight.data.numpy()
        w_shape = w.shape
        b = model.linear.bias.data.numpy()
        b_shape = b.shape
        u_shape = u.shape

        w_b = np.concatenate((w.flatten(), b.flatten()))
        u_w_b = np.concatenate((u.flatten(), w_b.flatten()))

        # admm does not support async
        if sync_mode == "reduce":
            u_w_b_merge = communicator.reduce_epoch(u_w_b, admm_epoch)
        elif sync_mode == "reduce_scatter":
            u_w_b_merge = communicator.reduce_scatter_epoch(u_w_b, admm_epoch)

        u_mean = u_w_b_merge[:u_shape[0] *
                             u_shape[1]].reshape(u_shape) / float(n_workers)
        w_mean = u_w_b_merge[u_shape[0] * u_shape[1]: u_shape[0] * u_shape[1] + w_shape[0] * w_shape[1]]\
                     .reshape(w_shape) / float(n_workers)
        b_mean = u_w_b_merge[u_shape[0] * u_shape[1] + w_shape[0] * w_shape[1]:]\
                     .reshape(b_shape[0]) / float(n_workers)

        model.linear.weight.data = torch.from_numpy(w_mean)
        model.linear.bias.data = torch.from_numpy(b_mean)
        admm_epoch_comm_time += time.time() - sync_start

        if worker_index == 0:
            delete_start = time.time()
            communicator.delete_expired_epoch(admm_epoch)
            admm_epoch_comm_time += time.time() - delete_start

        # z, u, r, s = update_z_u(w, z, u, rho, num_workers, lam)
        # stop = check_stop(ep_abs, ep_rel, r, s, dataset_size, num_features, w, z, u, rho)
        # print("stop = {}".format(stop))

        # z = num_workers * rho / (2 * lam + num_workers * rho) * (w + u_mean)
        z = update_z(w_mean, u_mean, rho, n_workers, lam)
        u = u + model.linear.weight.data.numpy() - z

        print(
            "ADMM Epoch[{}] finishes, cost {} s, cal cost {} s, sync cost {} s, test cost {} s"
            .format(admm_epoch,
                    time.time() - admm_epoch_start, admm_epoch_cal_time,
                    admm_epoch_comm_time, admm_epoch_test_time))

    # Test the Model
    n_test_correct = 0
    n_test = 0
    test_loss = 0
    for items, labels in validation_loader:
        items = Variable(items.view(-1, n_features))
        labels = Variable(labels)
        outputs = model(items)
        test_loss += criterion(outputs, labels).item()
        _, predicted = torch.max(outputs.data, 1)
        n_test += labels.size(0)
        n_test_correct += (predicted == labels).sum()

    print(
        'Train finish, time = %.4f, accuracy of the model on the %d test samples: %d %%, loss = %f'
        % (time.time() - train_start, n_test, 100. * n_test_correct / n_test,
           test_loss / n_test))

    if worker_index == 0:
        s3_storage.clear(tmp_table_name)
        s3_storage.clear(merged_table_name)

    end_time = time.time()
    print("Elapsed time = {} s".format(end_time - start_time))
def handler(event, context):

    tuner_function_name = "lambda_tuner"
    trial_function_name = "lambda_trial"
    function_start = time.time()
    function_duration = 14 * 60
    n_submit_trial = event.get('n_submit_trial', 0)

    # dataset setting
    dataset_name = 'higgs'
    data_bucket = "higgs-10"
    dataset_type = "dense_libsvm"   # dense_libsvm or sparse_libsvm
    n_features = 30
    n_classes = 2
    tmp_bucket = "tmp-params"
    merged_bucket = "merged-params"

    # training setting
    model = "lr"    # lr, svm, sparse_lr, or sparse_svm
    optim = "grad_avg"  # grad_avg, model_avg, or admm
    sync_mode = "reduce"    # async, reduce or reduce_scatter
    n_workers = 10

    # tuner configs
    tuner_strategy = "random_search"
    tuner_concurrency = 5
    lr_values = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10]
    lr_disc = DiscHyper("lr_discrete", lr_values)

    # hyper-parameters
    lr = 0.01
    batch_size = 100000
    n_epochs = 2
    valid_ratio = .2
    n_admm_epochs = 2
    lam = 0.01
    rho = 0.01

    # clear s3 bucket
    s3_client = s3_operator.get_client()
    s3_operator.clear_bucket(s3_client, tmp_bucket)
    s3_operator.clear_bucket(s3_client, merged_bucket)

    # set dynamodb table
    recorder_table_name = "recoder"
    dynamo_client = dynamo_operator.get_client()
    recorder_tb = DynamoTable(dynamo_client, recorder_table_name)
    items = recorder_tb.list()
    print("{} items in the recorder".format(len(items)))

    # lambda payload
    payload = dict()
    payload['dataset'] = dataset_name
    payload['data_bucket'] = data_bucket
    payload['dataset_type'] = dataset_type
    payload['n_features'] = n_features
    payload['n_classes'] = n_classes
    payload['n_workers'] = n_workers
    payload['tmp_bucket'] = tmp_bucket
    payload['merged_bucket'] = merged_bucket
    payload['model'] = model
    payload['optim'] = optim
    payload['sync_mode'] = sync_mode
    payload['lr'] = lr
    payload['batch_size'] = batch_size
    payload['n_epochs'] = n_epochs
    payload['valid_ratio'] = valid_ratio
    payload['n_admm_epochs'] = n_admm_epochs
    payload['lambda'] = lam
    payload['rho'] = rho

    # invoke functions
    lambda_client = boto3.client('lambda')

    n_trial = 10
    trial_counter = n_submit_trial

    for i in range(n_trial):
        n_recorder_items = len(recorder_tb.list())
        n_running_tail = trial_counter - n_recorder_items
        while n_running_tail >= tuner_concurrency:
            time.sleep(1)
            n_recorder_items = len(recorder_tb.list())
            n_running_tail = trial_counter - n_recorder_items
        for j in range(n_workers):
            payload = dict()
            payload['dataset'] = dataset_name
            payload['data_bucket'] = data_bucket
            payload['dataset_type'] = dataset_type
            payload['n_features'] = n_features
            payload['n_classes'] = n_classes
            payload['n_workers'] = n_workers
            payload['tmp_bucket'] = tmp_bucket
            payload['merged_bucket'] = merged_bucket
            payload['model'] = model
            payload['optim'] = optim
            payload['sync_mode'] = sync_mode
            payload['batch_size'] = batch_size
            payload['n_epochs'] = n_epochs
            payload['valid_ratio'] = valid_ratio
            payload['n_admm_epochs'] = n_admm_epochs
            payload['lambda'] = lam
            payload['rho'] = rho
            payload['function_name'] = trial_function_name

            payload['tmp_bucket'] = tmp_bucket + "-i"
            payload['merged_bucket'] = merged_bucket + "-i"
            payload['lr'] = lr_disc.next() if tuner_strategy == "grid_search" else lr_disc.sample()
            payload['worker_index'] = j
            payload['train_file'] = 'training_{}.pt'.format(j)
            payload['test_file'] = 'test.pt'
            lambda_client.invoke(FunctionName=trial_function_name,
                                 InvocationType='Event',
                                 Payload=json.dumps(payload))
        trial_counter += 1
        if time.time() - function_start > function_duration:
            # revoke itself
            print("Invoking the next round of tuner functions, total trials {}, submitted trials {}"
                  .format(n_trial, trial_counter))
            lambda_client = boto3.client('lambda')
            payload = {
                'n_submit_trial': n_submit_trial
            }
            lambda_client.invoke(FunctionName=tuner_function_name,
                                 InvocationType='Event',
                                 Payload=json.dumps(payload))
Exemple #4
0
def handler(event, context):
    # dataset
    data_bucket = event['data_bucket']
    file = event['file']
    dataset_type = event["dataset_type"]
    assert dataset_type == "dense_libsvm"
    n_features = event['n_features']
    n_workers = event["n_workers"]
    worker_index = event['worker_index']
    tmp_table_name = event['tmp_table_name']
    merged_table_name = event['merged_table_name']
    key_col = event['key_col']

    # hyper-parameter
    n_clusters = event['n_clusters']
    n_epochs = event["n_epochs"]
    threshold = event["threshold"]
    sync_mode = event["sync_mode"]
    assert sync_mode.lower() in [
        Synchronization.Reduce, Synchronization.Reduce_Scatter
    ]

    print('data bucket = {}'.format(data_bucket))
    print("file = {}".format(file))
    print('number of workers = {}'.format(n_workers))
    print('worker index = {}'.format(worker_index))
    print('num clusters = {}'.format(n_clusters))
    print('sync mode = {}'.format(sync_mode))

    s3_storage = S3Storage()
    dynamo_client = dynamo_operator.get_client()
    tmp_table = DynamoTable(dynamo_client, tmp_table_name)
    merged_table = DynamoTable(dynamo_client, merged_table_name)
    communicator = DynamoCommunicator(dynamo_client, tmp_table, merged_table,
                                      key_col, n_workers, worker_index)

    # Reading data from S3
    read_start = time.time()
    lines = s3_storage.load(file,
                            data_bucket).read().decode('utf-8').split("\n")
    print("read data cost {} s".format(time.time() - read_start))

    parse_start = time.time()
    dataset = libsvm_dataset.from_lines(lines, n_features, dataset_type).ins_np
    data_type = dataset.dtype
    centroid_shape = (n_clusters, dataset.shape[1])
    print("parse data cost {} s".format(time.time() - parse_start))
    print("dataset type: {}, dtype: {}, Centroids shape: {}, num_features: {}".
          format(dataset_type, data_type, centroid_shape, n_features))

    init_centroids_start = time.time()
    if worker_index == 0:
        centroids = dataset[0:n_clusters]
        merged_table.save(centroids.tobytes(), Prefix.KMeans_Init_Cent + "-1",
                          key_col)
    else:
        centroid_bytes = (merged_table.load_or_wait(
            Prefix.KMeans_Init_Cent + "-1", key_col, 0.1))['value'].value
        centroids = centroid_bytes2np(centroid_bytes, n_clusters, data_type)
        if centroid_shape != centroids.shape:
            raise Exception("The shape of centroids does not match.")

    print("initialize centroids takes {} s".format(time.time() -
                                                   init_centroids_start))

    model = cluster_models.get_model(dataset, centroids, dataset_type,
                                     n_features, n_clusters)

    train_start = time.time()
    for epoch in range(n_epochs):
        epoch_start = time.time()

        # rearrange data points
        model.find_nearest_cluster()

        local_cent = model.get_centroids("numpy").reshape(-1)
        local_cent_error = np.concatenate(
            (local_cent.flatten(), np.array([model.error], dtype=np.float32)))
        epoch_cal_time = time.time() - epoch_start

        # sync local centroids and error
        epoch_comm_start = time.time()

        if sync_mode == "reduce":
            cent_error_merge = communicator.reduce_epoch(
                local_cent_error, epoch)
        elif sync_mode == "reduce_scatter":
            cent_error_merge = communicator.reduce_scatter_epoch(
                local_cent_error, epoch)

        cent_merge = cent_error_merge[:-1].reshape(centroid_shape) / float(
            n_workers)
        error_merge = cent_error_merge[-1] / float(n_workers)

        model.centroids = cent_merge
        model.error = error_merge
        epoch_comm_time = time.time() - epoch_comm_start

        print("one {} round cost {} s".format(sync_mode, epoch_comm_time))

        print(
            "Epoch[{}] Worker[{}], error = {}, cost {} s, cal cost {} s, sync cost {} s"
            .format(epoch, worker_index, model.error,
                    time.time() - epoch_start, epoch_cal_time,
                    epoch_comm_time))

        if model.error < threshold:
            break

    if worker_index == 0:
        tmp_table.clear(key_col)
        merged_table.clear(key_col)

    print("Worker[{}] finishes training: Error = {}, cost {} s".format(
        worker_index, model.error,
        time.time() - train_start))
    return
Exemple #5
0
def handler(event, context):

    function_name = "lambda_core"

    # dataset setting
    dataset_name = 'cifar10'
    data_bucket = "cifar10dataset"
    n_features = 32 * 32
    n_classes = 10
    tmp_table_name = "tmp-params"
    merged_table_name = "merged-params"
    cp_bucket = "cp-model"
    key_col = "key"

    # training setting
    model = "mobilenet"     # mobilenet or resnet
    optim = "grad_avg"  # grad_avg or model_avg
    sync_mode = "reduce"    # async, reduce or reduce_scatter
    n_workers = 10

    # hyper-parameters
    lr = 0.01
    batch_size = 256
    n_epochs = 5
    start_epoch = 0
    run_epochs = 3

    # clear dynamodb table
    s3_storage = S3Storage()
    s3_storage.clear(cp_bucket)
    dynamo_client = dynamo_operator.get_client()
    tmp_tb = DynamoTable(dynamo_client, tmp_table_name)
    merged_tb = DynamoTable(dynamo_client, tmp_table_name)
    tmp_tb.clear(key_col)
    merged_tb.clear(key_col)

    # lambda payload
    payload = dict()
    payload['dataset'] = dataset_name
    payload['data_bucket'] = data_bucket
    payload['n_features'] = n_features
    payload['n_classes'] = n_classes
    payload['n_workers'] = n_workers
    payload['tmp_table_name'] = tmp_table_name
    payload['merged_table_name'] = merged_table_name
    payload['key_col'] = key_col
    payload['cp_bucket'] = cp_bucket
    payload['model'] = model
    payload['optim'] = optim
    payload['sync_mode'] = sync_mode
    payload['lr'] = lr
    payload['batch_size'] = batch_size
    payload['n_epochs'] = n_epochs
    payload['start_epoch'] = start_epoch
    payload['run_epochs'] = run_epochs
    payload['function_name'] = function_name

    # invoke functions
    lambda_client = boto3.client('lambda')
    for i in range(n_workers):
        payload['worker_index'] = i
        payload['train_file'] = 'training_{}.pt'.format(i)
        payload['test_file'] = 'test.pt'
        lambda_client.invoke(FunctionName=function_name,
                             InvocationType='Event',
                             Payload=json.dumps(payload))
Exemple #6
0
def handler(event, context):
    start_time = time.time()

    # dataset setting
    train_file = event['train_file']
    test_file = event['test_file']
    data_bucket = event['data_bucket']
    n_features = event['n_features']
    n_classes = event['n_classes']
    n_workers = event['n_workers']
    worker_index = event['worker_index']
    tmp_table_name = event['tmp_table_name']
    merged_table_name = event['merged_table_name']
    key_col = event['key_col']
    cp_bucket = event['cp_bucket']

    # training setting
    model_name = event['model']
    optim = event['optim']
    sync_mode = event['sync_mode']
    assert model_name.lower() in MLModel.Deep_Models
    assert optim.lower() in [Optimization.Grad_Avg, Optimization.Model_Avg]
    assert sync_mode.lower() in Synchronization.All

    # hyper-parameter
    learning_rate = event['lr']
    batch_size = event['batch_size']
    n_epochs = event['n_epochs']
    start_epoch = event['start_epoch']
    run_epochs = event['run_epochs']

    function_name = event['function_name']

    print('data bucket = {}'.format(data_bucket))
    print("train file = {}".format(train_file))
    print("test file = {}".format(test_file))
    print('number of workers = {}'.format(n_workers))
    print('worker index = {}'.format(worker_index))
    print('model = {}'.format(model_name))
    print('optimization = {}'.format(optim))
    print('sync mode = {}'.format(sync_mode))
    print('start epoch = {}'.format(start_epoch))
    print('run epochs = {}'.format(run_epochs))

    print("Run function {}, round: {}/{}, epoch: {}/{} to {}/{}".format(
        function_name,
        int(start_epoch / run_epochs) + 1, math.ceil(n_epochs / run_epochs),
        start_epoch + 1, n_epochs, start_epoch + run_epochs, n_epochs))

    s3_storage = S3Storage()
    dynamo_client = dynamo_operator.get_client()
    tmp_table = DynamoTable(dynamo_client, tmp_table_name)
    merged_table = DynamoTable(dynamo_client, merged_table_name)
    communicator = DynamoCommunicator(dynamo_client, tmp_table, merged_table,
                                      key_col, n_workers, worker_index)

    # download file from s3
    local_dir = "/tmp/"
    read_start = time.time()
    s3_storage.download(data_bucket, train_file,
                        os.path.join(local_dir, train_file))
    s3_storage.download(data_bucket, test_file,
                        os.path.join(local_dir, test_file))
    print("download file from s3 cost {} s".format(time.time() - read_start))

    train_set = torch.load(os.path.join(local_dir, train_file))
    test_set = torch.load(os.path.join(local_dir, test_file))
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=100,
                                              shuffle=False)
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    print("read data cost {} s".format(time.time() - read_start))

    random_seed = 100
    torch.manual_seed(random_seed)

    device = 'cpu'
    net = deep_models.get_models(model_name).to(device)

    # Loss and Optimizer
    # Softmax is internally computed.
    # Set parameters to be updated.
    optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate)

    # load checkpoint model if it is not the first round
    if start_epoch != 0:
        checked_file = 'checkpoint_{}.pt'.format(start_epoch - 1)
        s3_storage.download(cp_bucket, checked_file,
                            os.path.join(local_dir, checked_file))
        checkpoint_model = torch.load(os.path.join(local_dir, checked_file))

        net.load_state_dict(checkpoint_model['model_state_dict'])
        optimizer.load_state_dict(checkpoint_model['optimizer_state_dict'])
        print("load checkpoint model at epoch {}".format(start_epoch - 1))

    for epoch in range(start_epoch, min(start_epoch + run_epochs, n_epochs)):

        train_loss, train_acc = train_one_epoch(epoch, net, train_loader,
                                                optimizer, worker_index,
                                                communicator, optim, sync_mode)
        test_loss, test_acc = test(epoch, net, test_loader)

        print(
            'Epoch: {}/{},'.format(epoch + 1, n_epochs),
            'train loss: {}'.format(train_loss),
            'train acc: {},'.format(train_acc),
            'test loss: {}'.format(test_loss),
            'test acc: {}.'.format(test_acc),
        )

    if worker_index == 0:
        tmp_table.clear()
        merged_table.clear()

    # training is not finished yet, invoke next round
    if epoch < n_epochs - 1:
        checkpoint_model = {
            'epoch': epoch,
            'model_state_dict': net.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'loss': train_loss.average
        }

        checked_file = 'checkpoint_{}.pt'.format(epoch)

        if worker_index == 0:
            torch.save(checkpoint_model, os.path.join(local_dir, checked_file))
            s3_storage.upload_file(cp_bucket, checked_file,
                                   os.path.join(local_dir, checked_file))
            print("checkpoint model at epoch {} saved!".format(epoch))

        print(
            "Invoking the next round of functions. round: {}/{}, start epoch: {}, run epoch: {}"
            .format(
                int((epoch + 1) / run_epochs) + 1,
                math.ceil(n_epochs / run_epochs), epoch + 1, run_epochs))
        lambda_client = boto3.client('lambda')
        payload = {
            'train_file': event['train_file'],
            'test_file': event['test_file'],
            'data_bucket': event['data_bucket'],
            'n_features': event['n_features'],
            'n_classes': event['n_classes'],
            'n_workers': event['n_workers'],
            'worker_index': event['worker_index'],
            'tmp_table_name': event['tmp_table_name'],
            'merged_table_name': event['merged_table_name'],
            'key_col': event['key_col'],
            'cp_bucket': event['cp_bucket'],
            'model': event['model'],
            'optim': event['optim'],
            'sync_mode': event['sync_mode'],
            'lr': event['lr'],
            'batch_size': event['batch_size'],
            'n_epochs': event['n_epochs'],
            'start_epoch': epoch + 1,
            'run_epochs': event['run_epochs'],
            'function_name': event['function_name']
        }
        lambda_client.invoke(FunctionName=function_name,
                             InvocationType='Event',
                             Payload=json.dumps(payload))

    end_time = time.time()
    print("Elapsed time = {} s".format(end_time - start_time))
def handler(event, context):

    function_name = "lambda_core"

    # dataset setting
    dataset_name = 'higgs'
    data_bucket = "higgs-10"
    dataset_type = "dense_libsvm"  # dense_libsvm
    n_features = 30
    n_classes = 2
    tmp_table_name = "tmp-params"
    merged_table_name = "merged-params"
    key_col = "key"

    # training setting
    model = "lr"  # lr, svm
    optim = "grad_avg"  # grad_avg, model_avg, or admm
    sync_mode = "reduce"  # async, reduce or reduce_scatter
    n_workers = 10

    # hyper-parameters
    lr = 0.01
    batch_size = 100000
    n_epochs = 2
    valid_ratio = .2
    n_admm_epochs = 2
    lam = 0.01
    rho = 0.01

    # clear dynamodb table
    dynamo_client = dynamo_operator.get_client()
    tmp_tb = DynamoTable(dynamo_client, tmp_table_name)
    merged_tb = DynamoTable(dynamo_client, tmp_table_name)
    tmp_tb.clear(key_col)
    merged_tb.clear(key_col)

    # lambda payload
    payload = dict()
    payload['dataset'] = dataset_name
    payload['data_bucket'] = data_bucket
    payload['dataset_type'] = dataset_type
    payload['n_features'] = n_features
    payload['n_classes'] = n_classes
    payload['n_workers'] = n_workers
    payload['tmp_table_name'] = tmp_table_name
    payload['merged_table_name'] = merged_table_name
    payload['key_col'] = key_col
    payload['model'] = model
    payload['optim'] = optim
    payload['sync_mode'] = sync_mode
    payload['lr'] = lr
    payload['batch_size'] = batch_size
    payload['n_epochs'] = n_epochs
    payload['valid_ratio'] = valid_ratio
    payload['n_admm_epochs'] = n_admm_epochs
    payload['lambda'] = lam
    payload['rho'] = rho

    # invoke functions
    lambda_client = boto3.client('lambda')
    for i in range(n_workers):
        payload['worker_index'] = i
        payload['file'] = '{}_{}'.format(i, n_workers)
        lambda_client.invoke(FunctionName=function_name,
                             InvocationType='Event',
                             Payload=json.dumps(payload))
Exemple #8
0
def handler(event, context):

    tuner_function_name = "lambda_tuner"
    trial_function_name = "lambda_trial"
    function_start = time.time()
    function_duration = 14 * 60
    n_submit_trial = event.get('n_submit_trial', 0)

    # dataset setting
    dataset_name = 'cifar10'
    data_bucket = "cifar10dataset"
    n_features = 32 * 32
    n_classes = 10
    host = "127.0.0.1"
    port = 11211
    tmp_bucket = "tmp-params"
    merged_bucket = "merged-params"
    cp_bucket = "cp-model"

    # training setting
    model = "mobilenet"  # mobilenet or resnet
    optim = "grad_avg"  # grad_avg or model_avg
    sync_mode = "reduce"  # async, reduce or reduce_scatter
    n_workers = 10

    # tuner configs
    tuner_strategy = "random_search"
    tuner_concurrency = 5
    lr_values = [0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.10]
    lr_disc = DiscHyper("lr_discrete", lr_values)

    # hyper-parameters
    batch_size = 256
    n_epochs = 5
    start_epoch = 0
    run_epochs = 3

    # set dynamodb table
    recorder_table_name = "recoder"
    dynamo_client = dynamo_operator.get_client()
    recorder_tb = DynamoTable(dynamo_client, recorder_table_name)
    items = recorder_tb.list()
    print("{} items in the recorder".format(len(items)))

    # lambda payload
    payload = dict()
    payload['dataset'] = dataset_name
    payload['data_bucket'] = data_bucket
    payload['n_features'] = n_features
    payload['n_classes'] = n_classes
    payload['n_workers'] = n_workers
    payload['host'] = host
    payload['port'] = port
    payload['tmp_bucket'] = tmp_bucket
    payload['merged_bucket'] = merged_bucket
    payload['cp_bucket'] = cp_bucket
    payload['model'] = model
    payload['optim'] = optim
    payload['sync_mode'] = sync_mode
    payload['lr'] = lr
    payload['batch_size'] = batch_size
    payload['n_epochs'] = n_epochs
    payload['start_epoch'] = start_epoch
    payload['run_epochs'] = run_epochs
    payload['function_name'] = function_name

    # invoke functions
    lambda_client = boto3.client('lambda')
    for i in range(n_workers):
        payload['worker_index'] = i
        payload['train_file'] = 'training_{}.pt'.format(i)
        payload['test_file'] = 'test.pt'
        lambda_client.invoke(FunctionName=function_name,
                             InvocationType='Event',
                             Payload=json.dumps(payload))

    # invoke functions
    lambda_client = boto3.client('lambda')

    n_trial = 10
    trial_counter = n_submit_trial

    for i in range(n_trial):
        n_recorder_items = len(recorder_tb.list())
        n_running_tail = trial_counter - n_recorder_items
        while n_running_tail >= tuner_concurrency:
            time.sleep(1)
            n_recorder_items = len(recorder_tb.list())
            n_running_tail = trial_counter - n_recorder_items
        for j in range(n_workers):
            # lambda payload
            payload = dict()
            payload['dataset'] = dataset_name
            payload['data_bucket'] = data_bucket
            payload['n_features'] = n_features
            payload['n_classes'] = n_classes
            payload['n_workers'] = n_workers
            payload['host'] = host
            payload['port'] = port
            payload['model'] = model
            payload['optim'] = optim
            payload['sync_mode'] = sync_mode
            payload['lr'] = lr
            payload['batch_size'] = batch_size
            payload['n_epochs'] = n_epochs
            payload['start_epoch'] = start_epoch
            payload['run_epochs'] = run_epochs
            payload['function_name'] = function_name

            payload['tmp_bucket'] = tmp_bucket + "-i"
            payload['merged_bucket'] = merged_bucket + "-i"
            payload['cp_bucket'] = cp_bucket + "-i"
            payload['lr'] = lr_disc.next(
            ) if tuner_strategy == "grid_search" else lr_disc.sample()
            payload['worker_index'] = j
            payload['train_file'] = 'training_{}.pt'.format(j)
            payload['test_file'] = 'test.pt'
            lambda_client.invoke(FunctionName=trial_function_name,
                                 InvocationType='Event',
                                 Payload=json.dumps(payload))
        trial_counter += 1
        if time.time() - function_start > function_duration:
            # revoke itself
            print(
                "Invoking the next round of tuner functions, total trials {}, submitted trials {}"
                .format(n_trial, trial_counter))
            lambda_client = boto3.client('lambda')
            payload = {'n_submit_trial': n_submit_trial}
            lambda_client.invoke(FunctionName=tuner_function_name,
                                 InvocationType='Event',
                                 Payload=json.dumps(payload))