def handler(event, context): start_time = time.time() bucket = event['bucket_name'] worker_index = event['rank'] num_workers = event['num_workers'] key = event['file'] tmp_bucket = event['tmp_bucket'] merged_bucket = event['merged_bucket'] num_classes = event['num_classes'] num_features = event['num_features'] num_epochs = event['num_epochs'] learning_rate = event['learning_rate'] batch_size = event['batch_size'] print('bucket = {}'.format(bucket)) print("file = {}".format(key)) print('tmp bucket = {}'.format(tmp_bucket)) print('merged bucket = {}'.format(merged_bucket)) print('number of workers = {}'.format(num_workers)) print('worker index = {}'.format(worker_index)) print('num classes = {}'.format(num_classes)) print('num features = {}'.format(num_features)) print('num epochs = {}'.format(num_epochs)) print('learning rate = {}'.format(learning_rate)) print("batch size = {}".format(batch_size)) s3 = boto3.client('s3') feature_file_name = "features_{}_{}.npy".format(worker_index, num_workers) label_file_name = "labels_{}_{}.npy".format(worker_index, num_workers) # read file from s3 s3.download_file(bucket, feature_file_name, local_dir + str(feature_file_name)) features_matrix = np.load(local_dir + str(feature_file_name)) print("read features matrix cost {} s".format(time.time() - start_time)) print("feature matrix shape = {}, dtype = {}".format(features_matrix.shape, features_matrix.dtype)) print("feature matrix sample = {}".format(features_matrix[0])) row_features = features_matrix.shape[0] col_features = features_matrix.shape[1] s3.download_file(bucket, label_file_name, local_dir + str(label_file_name)) labels_matrix = np.load(local_dir + str(label_file_name)) print("read label matrix cost {} s".format(time.time() - start_time)) print("label matrix shape = {}, dtype = {}".format(labels_matrix.shape, labels_matrix.dtype)) print("label matrix sample = {}".format(labels_matrix[0:10])) row_labels = labels_matrix.shape[0] if row_features != row_labels: raise AssertionError("row of feature matrix is {}, but row of label matrix is {}." .format(row_features, row_labels)) parse_start = time.time() dataset = DenseDatasetWithNP(col_features, features_matrix, labels_matrix) print("parse data cost {} s".format(time.time() - parse_start)) preprocess_start = time.time() # Creating data indices for training and validation splits: dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(validation_ratio * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(val_indices) train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler) print("preprocess data cost {} s, dataset size = {}" .format(time.time() - preprocess_start, dataset_size)) model = LogisticRegression(num_features, num_classes) # Loss and Optimizer # Softmax is internally computed. # Set parameters to be updated. criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Training the Model train_start = time.time() for epoch in range(num_epochs): epoch_start = time.time() epoch_loss = 0 for batch_index, (items, labels) in enumerate(train_loader): # print("------worker {} epoch {} batch {}------".format(worker_index, epoch, batch_index)) batch_start = time.time() items = Variable(items.view(-1, num_features)) labels = Variable(labels) # Forward + Backward + Optimize optimizer.zero_grad() outputs = model(items) loss = criterion(outputs, labels) loss.backward() # print("forward and backward cost {} s".format(time.time() - batch_start)) w_grad = model.linear.weight.grad.data.numpy() w_grad_shape = w_grad.shape b_grad = model.linear.bias.grad.data.numpy() b_grad_shape = b_grad.shape w_b_grad = np.concatenate((w_grad.flatten(), b_grad.flatten())) cal_time = time.time() - batch_start sync_start = time.time() postfix = "{}_{}".format(epoch, batch_index) w_b_grad_merge = reduce_batch(w_b_grad, tmp_bucket, merged_bucket, num_workers, worker_index, postfix) w_grad_merge = \ w_b_grad_merge[:w_grad_shape[0] * w_grad_shape[1]].reshape(w_grad_shape) / float(num_workers) b_grad_merge = \ w_b_grad_merge[w_grad_shape[0] * w_grad_shape[1]:].reshape(b_grad_shape[0]) / float(num_workers) model.linear.weight.grad = Variable(torch.from_numpy(w_grad_merge)) model.linear.bias.grad = Variable(torch.from_numpy(b_grad_merge)) sync_time = time.time() - sync_start optimizer.step() # Test the Model test_start = time.time() correct = 0 total = 0 test_loss = 0 for items, labels in validation_loader: items = Variable(items.view(-1, num_features)) labels = Variable(labels) outputs = model(items) test_loss += criterion(outputs, labels).data _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() test_time = time.time() - test_start print('Epoch: [%d/%d], Step: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f, ' 'batch cost %.4f s: cal cost %.4f s communication cost %.4f s test cost %.4f s, ' 'accuracy of the model on the %d test samples: %d %%, loss = %f' % (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size, time.time() - train_start, loss.data, time.time() - epoch_start, time.time() - batch_start, cal_time, sync_time, test_time, len(val_indices), 100 * correct / total, test_loss / total)) if worker_index == 0: delete_expired_merged_batch(merged_bucket, epoch, batch_index) # Test the Model test_start = time.time() correct = 0 total = 0 test_loss = 0 for items, labels in validation_loader: items = Variable(items.view(-1, num_features)) labels = Variable(labels) outputs = model(items) test_loss += criterion(outputs, labels).data _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() print('Epoch: %d, time = %.4f, accuracy of the model on the %d test samples: %d %%, loss = %f' % (epoch, time.time() - train_start, len(val_indices), 100 * correct / total, test_loss / total)) if worker_index == 0: clear_bucket(merged_bucket) clear_bucket(tmp_bucket) end_time = time.time() print("Elapsed time = {} s".format(end_time - start_time))
def handler(event, context): start_time = time.time() bucket = event['bucket_name'] worker_index = event['rank'] num_workers = event['num_workers'] key = event['file'] merged_bucket = event['merged_bucket'] num_classes = event['num_classes'] num_features = event['num_features'] pos_tag = event['pos_tag'] num_epochs = event['num_epochs'] learning_rate = event['learning_rate'] batch_size = event['batch_size'] elasti_location = event['elasticache'] endpoint = memcached_init(elasti_location) print('bucket = {}'.format(bucket)) print("file = {}".format(key)) print('merged bucket = {}'.format(merged_bucket)) print('number of workers = {}'.format(num_workers)) print('worker index = {}'.format(worker_index)) print('num epochs = {}'.format(num_epochs)) print('learning rate = {}'.format(learning_rate)) print("batch size = {}".format(batch_size)) # read file from s3 file = get_object(bucket, key).read().decode('utf-8').split("\n") print("read data cost {} s".format(time.time() - start_time)) parse_start = time.time() dataset = DenseLibsvmDataset(file, num_features, pos_tag) totol_count = dataset.__len__() pos_count = 0 for i in range(totol_count): if dataset.__getitem__(i)[1] == 1: pos_count += 1 print("{} positive observations out of {}".format(pos_count, totol_count)) print("parse data cost {} s".format(time.time() - parse_start)) preprocess_start = time.time() # Creating data indices for training and validation splits: dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(validation_ratio * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] # Creating PT data samplers and loaders: train_sampler = SubsetRandomSampler(train_indices) valid_sampler = SubsetRandomSampler(val_indices) train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=train_sampler) validation_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, sampler=valid_sampler) print("preprocess data cost {} s".format(time.time() - preprocess_start)) model = SVM(num_features, num_classes) # Loss and Optimizer # Softmax is internally computed. # Set parameters to be updated. criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Training the Model train_start = time.time() for epoch in range(num_epochs): epoch_start = time.time() epoch_loss = 0 cal_time = 0 sync_time = 0 for batch_index, (items, labels) in enumerate(train_loader): batch_start = time.time() items = Variable(items.view(-1, num_features)) labels = Variable(labels) # Forward + Backward + Optimize optimizer.zero_grad() outputs = model(items) loss = criterion(outputs, labels) epoch_loss += loss.data loss.backward() w_grad = model.linear.weight.grad.data.numpy() w_grad_shape = w_grad.shape b_grad = model.linear.bias.grad.data.numpy() b_grad_shape = b_grad.shape w_b_grad = np.concatenate((w_grad.flatten(), b_grad.flatten())) cal_time += time.time() - batch_start sync_start = time.time() postfix = "{}_{}".format(epoch, batch_index) w_b_grad_merge = reduce_batch(endpoint, w_b_grad, merged_bucket, num_workers, worker_index, postfix) w_grad_merge = \ w_b_grad_merge[:w_grad_shape[0] * w_grad_shape[1]].reshape(w_grad_shape) / float(num_workers) b_grad_merge = \ w_b_grad_merge[w_grad_shape[0] * w_grad_shape[1]:].reshape(b_grad_shape[0]) / float(num_workers) model.linear.weight.grad = Variable(torch.from_numpy(w_grad_merge)) model.linear.bias.grad = Variable(torch.from_numpy(b_grad_merge)) sync_time += time.time() - sync_start optimizer.step() # print('Epoch: [%d/%d], Step: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f, ' # 'batch cost %.4f s: cal cost %.4f s communication cost %.4f s, ' # % (epoch + 1, num_epochs, batch_index, len(train_indices) / batch_size, # time.time() - train_start, loss.data, time.time() - epoch_start, # time.time() - batch_start, cal_time, sync_time)) # Test the Model test_start = time.time() correct = 0 total = 0 test_loss = 0 for items, labels in validation_loader: items = Variable(items.view(-1, num_features)) labels = Variable(labels) outputs = model(items) test_loss += criterion(outputs, labels).data _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum() test_time = time.time() - test_start print( 'Epoch %d has %d batches, time = %.4f, epoch cost %.4f s: ' 'computation cost %.4f s communication cost %.4f s, ' 'train loss = %.4f, test cost %.4f s, accuracy of the model on the %d test samples: %d %%, loss = %f' % (epoch, batch_index, time.time() - train_start, time.time() - epoch_start, cal_time, sync_time, epoch_loss, test_time, len(val_indices), 100 * correct / total, test_loss / total)) if worker_index == 0: clear_bucket(endpoint) end_time = time.time() print("Elapsed time = {} s".format(end_time - start_time))
def train(epoch, net, trainloader, optimizer, device, worker_index, num_worker, endpoint, sync_mode, sync_step): net.train() epoch_start = time.time() epoch_sync_time = 0 num_batch = 0 train_acc = Accuracy() train_loss = Average() for batch_idx, (inputs, targets) in enumerate(trainloader): # print("------worker {} epoch {} batch {}------".format(worker_index, epoch+1, batch_idx+1)) batch_start = time.time() inputs, targets = inputs.to(device), targets.to(device) outputs = net(inputs) loss = F.cross_entropy(outputs, targets) optimizer.zero_grad() loss.backward() # print("forward and backward cost {} s".format(time.time()-batch_start)) if sync_mode == 'model_avg': # apply local gradient to local model optimizer.step() # average model if (batch_idx + 1) % sync_step == 0: sync_start = time.time() #################################reduce_broadcast#################################### print("starting model average") weights = [param.data.numpy() for param in net.parameters()] # print("[Worker {}] Gradients before sync = {}".format(worker_index, gradients[0][0])) sync_start = time.time() postfix = "{}_{}".format(epoch, batch_idx) data = pickle.dumps(weights) merged_value = reduce_batch(endpoint, data, merged_bucket, num_worker, worker_index, postfix) # print("[Worker {}] Gradients after sync = {}".format(worker_index, merged_value[0][0])) for layer_index, param in enumerate(net.parameters()): param.data = torch.from_numpy(merged_value[layer_index]) # gradients = [param.grad.data.numpy() for param in net.parameters()] # print("[Worker {}] Gradients after sync = {}".format(worker_index, gradients[0][0])) # print("synchronization cost {} s".format(time.time() - sync_start)) epoch_sync_time += time.time() - sync_start if sync_mode == 'grad_avg': sync_start = time.time() #################################scatter_reduce#################################### # get gradients and flatten it to a 1-D array # gradients = [param.grad.data.numpy() for param in net.parameters()] # print("[Worker {}] Gradients before sync = {}".format(worker_index, gradients[0][0])) # param_dic = {} # for index, param in enumerate(net.parameters()): # param_dic[index] = [param.grad.data.numpy().size, param.grad.data.numpy().shape] # if index == 0: # flattened_param = param.grad.data.numpy().flatten() # else: # flattened_param = np.concatenate((flattened_param, param.grad.data.numpy().flatten())) # comm_start = time.time() # # merge gradients # file_postfix = "{}_{}".format(epoch, batch_idx) # merged_value = scatter_reduce(flattened_param, tmp_bucket, merged_bucket, num_worker, worker_index, file_postfix) # merged_value /= float(num_worker) # # print("scatter_reduce cost {} s".format(time.time() - comm_start)) # # update the model gradients by layers # offset = 0 # for layer_index, param in enumerate(net.parameters()): # layer_size = param_dic[layer_index][0] # layer_shape = param_dic[layer_index][1] # layer_value = merged_value[offset : offset + layer_size].reshape(layer_shape) # param.grad.data = torch.from_numpy(layer_value) # offset += layer_size # if worker_index == 0: # delete_expired_merged(merged_bucket, epoch, batch_idx) #################################scatter_reduce#################################### #################################reduce_broadcast#################################### gradients = [param.grad.data.numpy() for param in net.parameters()] # print("[Worker {}] Gradients before sync = {}".format(worker_index, gradients[0][0])) data = pickle.dumps(gradients) merged_value = reduce_batch(endpoint, data, merged_bucket, num_worker, worker_index, postfix) # print("[Worker {}] Gradients after sync = {}".format(worker_index, merged_value[0][0])) for layer_index, param in enumerate(net.parameters()): param.grad.data = torch.from_numpy(merged_value[layer_index]) # gradients = [param.grad.data.numpy() for param in net.parameters()] # print("[Worker {}] Gradients after sync = {}".format(worker_index, gradients[0][0])) # print("synchronization cost {} s".format(time.time() - sync_start)) #################################reduce_broadcast#################################### epoch_sync_time += time.time() - sync_start optimizer.step() if sync_mode == 'cen': optimizer.step() train_acc.update(outputs, targets) train_loss.update(loss.item(), inputs.size(0)) if num_batch % 10 == 0: print("Epoch {} Batch {} training Loss:{}, Acc:{}".format( epoch + 1, num_batch, train_loss, train_acc)) num_batch += 1 epoch_time = time.time() - epoch_start print( "Epoch {} has {} batches, time = {} s, sync time = {} s, cal time = {} s" .format(epoch + 1, num_batch, epoch_time, epoch_sync_time, epoch_time - epoch_sync_time)) return train_loss, train_acc
def handler(event, context): try: start_time = time.time() bucket_name = event['bucket_name'] worker_index = event['rank'] num_workers = event['num_workers'] key = event['file'] merged_bucket = event['merged_bucket'] num_features = event['num_features'] learning_rate = event["learning_rate"] batch_size = event["batch_size"] num_epochs = event["num_epochs"] validation_ratio = event["validation_ratio"] elasti_location = event['elasticache'] endpoint = memcached_init(elasti_location) # read file from s3 file = get_object(bucket_name, key).read().decode('utf-8').split("\n") print("read data cost {} s".format(time.time() - start_time)) parse_start = time.time() dataset = SparseDatasetWithLines(file, num_features) print("parse data cost {} s".format(time.time() - parse_start)) preprocess_start = time.time() dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(validation_ratio * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] train_set = [dataset[i] for i in train_indices] val_set = [dataset[i] for i in val_indices] print("preprocess data cost {} s".format(time.time() - preprocess_start)) lr = LogisticRegression(train_set, val_set, num_features, num_epochs, learning_rate, batch_size) # Training the Model train_start = time.time() for epoch in range(num_epochs): epoch_start = time.time() num_batches = math.floor(len(train_set) / batch_size) print(f"worker {worker_index} epoch {epoch}") for batch_idx in range(num_batches): batch_start = time.time() batch_ins, batch_label = lr.next_batch(batch_idx) batch_grad = torch.zeros(lr.n_input, 1, requires_grad=False) batch_bias = np.float(0) train_loss = Loss() train_acc = Accuracy() for i in range(len(batch_ins)): z = lr.forward(batch_ins[i]) h = lr.sigmoid(z) loss = lr.loss(h, batch_label[i]) #print("z= {}, h= {}, loss = {}".format(z, h, loss)) train_loss.update(loss, 1) train_acc.update(h, batch_label[i]) g = lr.backward(batch_ins[i], h.item(), batch_label[i]) batch_grad.add_(g) batch_bias += np.sum(h.item() - batch_label[i]) batch_grad = batch_grad.div(len(batch_ins)) batch_bias = batch_bias / len(batch_ins) batch_grad.mul_(-1.0 * learning_rate) lr.grad.add_(batch_grad) lr.bias = lr.bias - batch_bias * learning_rate sync_start = time.time() np_grad = lr.grad.numpy().flatten() np_bias = np.array(lr.bias, dtype=np_grad.dtype) w_and_b = np.concatenate((np_grad, np_bias)) postfix = "{}_{}".format(epoch, batch_idx) w_b_merge = reduce_batch(endpoint, w_and_b, merged_bucket, num_workers, worker_index, postfix) lr.grad, lr.bias = w_b_merge[:-1].reshape(num_features, 1) / float(num_workers), \ float(w_b_merge[-1]) / float(num_workers) sync_time = time.time() - sync_start print("synchronization cost {}s, batch takes {}s".format( sync_time, time.time() - batch_start)) if (batch_idx + 1) % 10 == 0: print("Epoch: {}/{}, Step: {}/{}, Loss: {}".format( epoch + 1, num_epochs, batch_idx + 1, num_batches, train_loss)) cal_time = time.time() - epoch_start test_start = time.time() val_loss, val_acc = lr.evaluate() test_time = time.time() - test_start print( 'Epoch: [%d/%d], Step: [%d/%d], Time: %.4f, Loss: %s, Accuracy: %s, epoch cost %.4f, ' 'cal cost %.4f s, sync cost %.4f s, test cost %.4f s, ' 'test accuracy: %s %%, test loss: %s' % (epoch + 1, num_epochs, batch_idx + 1, num_batches, time.time() - train_start, train_loss, train_acc, time.time() - epoch_start, cal_time, sync_time, test_time, val_acc, val_loss)) if worker_index == 0: clear_bucket(endpoint) print("elapsed time = {} s".format(time.time() - start_time)) except Exception as e: print("Error {}".format(e))
def handler(event, context): try: start_time = time.time() bucket_name = event['bucket_name'] worker_index = event['rank'] num_workers = event['num_workers'] key = event['file'] merged_bucket = event['merged_bucket'] num_features = event['num_features'] learning_rate = event["learning_rate"] batch_size = event["batch_size"] num_epochs = event["num_epochs"] validation_ratio = event["validation_ratio"] elasti_location = event['elasticache'] endpoint = memcached_init(elasti_location) # Reading data from S3 print(f"Reading training data from bucket = {bucket_name}, key = {key}") file = get_object(bucket_name, key).read().decode('utf-8').split("\n") print("read data cost {} s".format(time.time() - start_time)) parse_start = time.time() dataset = SparseDatasetWithLines(file, num_features) print("parse data cost {} s".format(time.time() - parse_start)) preprocess_start = time.time() dataset_size = len(dataset) indices = list(range(dataset_size)) split = int(np.floor(validation_ratio * dataset_size)) if shuffle_dataset: np.random.seed(random_seed) np.random.shuffle(indices) train_indices, val_indices = indices[split:], indices[:split] train_set = [dataset[i] for i in train_indices] val_set = [dataset[i] for i in val_indices] print("preprocess data cost {} s".format(time.time() - preprocess_start)) svm = SparseSVM(train_set, val_set, num_features, num_epochs, learning_rate, batch_size) # Training the Model for epoch in range(num_epochs): epoch_start = time.time() num_batches = math.floor(len(train_set) / batch_size) print("worker {} epoch {}".format(worker_index, epoch)) for batch_idx in range(num_batches): batch_start = time.time() batch_ins, batch_label = svm.next_batch(batch_idx) acc = svm.one_epoch(batch_idx, epoch) cal_time = time.time() - batch_start sync_start = time.time() np_w = svm.weights.numpy().flatten() postfix = "{}_{}".format(epoch, batch_idx) w_merge = reduce_batch(endpoint, np_w, merged_bucket, num_workers, worker_index, postfix) svm.weights = torch.from_numpy(w_merge).reshape(num_features, 1) sync_time = time.time() - sync_start print("computation takes {}s, synchronization cost {}s, batch takes {}s" .format(cal_time, sync_time, time.time() - batch_start)) if (batch_idx + 1) % 10 == 0: print("Epoch: {}/{}, Step: {}/{}, train acc: {}" .format(epoch + 1, num_epochs, batch_idx + 1, num_batches, acc)) val_acc = svm.evaluate() print("Epoch takes {}s, validation accuracy: {}".format(time.time() - epoch_start, val_acc)) if worker_index == 0: clear_bucket(endpoint) print("elapsed time = {} s".format(time.time() - start_time)) except Exception as e: print("Error {}".format(e))