def run_experiment(data_path, glove_path): # try: log_level = get_log_level() if not log_level: log_level = logging.INFO logger.info("Starting experiment") experiment = Experiment() logging.basicConfig(level=log_level) logging.info("Loading data") dpl = Datapipeline(data_path=data_path) dpl.transform() train, val = dpl.split_data() logging.info("Data loaded") model = twitter_model(glove_path=glove_path) model.build_model(train.values) model.get_train_data(train.values) output_model = model.train() filepath = os.path.join(get_outputs_path(), "trump_bot.h5") # metrics = model.train(params) # # experiment.log_metrics(**metrics) # save model output_model.save(filepath) logger.info("Experiment completed")
def run_experiment(params): try: log_level = get_log_level() if not log_level: log_level = logging.INFO logger.info("Starting experiment") experiment = Experiment() logging.basicConfig(level=log_level) metrics = model.train(params) experiment.log_metrics(**metrics) logger.info("Experiment completed") except Exception as e: logger.error(f"Experiment failed: {str(e)}")
def run_experiment(data_path, model_name, params): try: log_level = get_log_level() if not log_level: log_level = logging.INFO logger.info("Starting experiment") experiment = Experiment() logging.basicConfig(level=log_level) # initiate model class model = Model(model_name) logger.info(f'{model_name} ok') # get data refs = model.get_data(data_path, **params) logger.info('data ok') # train model model.model.train() logger.info('model trained') # get pred preds = refs.apply(lambda x: model.model.predict(x)) logger.info('preds ok') # eval precision, recall, f1 = model.model.eval(preds, refs) logger.info('eval ok') print(f'Precision: {precision}') print(f'Recall: {recall}') print(f'F1: {f1}') experiment.log_metrics(precision=precision) experiment.log_metrics(recall=recall) experiment.log_metrics(f1=f1) logger.info("Experiment completed") except Exception as e: logger.error(f"Experiment failed: {str(e)}")
def setup_logging(): log_level = get_log_level() if log_level is None: log_level = logging.INFO logging.basicConfig(level=log_level)
def main(): parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=1000, metavar='N', help='input batch size for training (default: 1000)') parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=15, metavar='N', help='number of epochs to train (default: 9)') parser.add_argument('--lr', type=float, default=1.0, metavar='LR', help='learning rate (default: 1.0)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') args = parser.parse_args() experiment = Experiment() logger = logging.getLogger('main') logger.setLevel(get_log_level()) use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") logger.info('%s', device) kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('.', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('.', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.test_batch_size, shuffle=True, **kwargs) model = Net().to(device) model_path = os.path.join(get_outputs_path(), 'model.p') state_path = os.path.join(get_outputs_path(), 'state.json') start = 1 if os.path.isfile(model_path): model.load_state_dict(torch.load(model_path)) logger.info('%s', 'Model Loaded') if os.path.isfile(state_path): with open(state_path, 'r') as f: data = json.load(f) start = data['epoch'] logger.info('%s', 'State Loaded') optimizer = optim.SGD(model.parameters(), lr=args.lr) with SummaryWriter(log_dir=get_outputs_path()) as writer: for epoch in range(start, args.epochs + 1): train(epoch, writer, experiment, args, model, device, train_loader, optimizer) test(epoch, writer, experiment, args, model, device, test_loader) torch.save(model.state_dict(), model_path) with open(state_path, 'w') as f: data = { 'epoch' : epoch } json.dump(data, f)
def set_logging(log_level=None): if log_level == 'INFO': log_level = tf.logging.INFO elif log_level == 'DEBUG': log_level = tf.logging.DEBUG elif log_level == 'WARN': log_level = tf.logging.WARN else: log_level = 'INFO' tf.logging.set_verbosity(log_level) set_logging(get_log_level()) experiment = Experiment() vm_paths = list(get_data_paths().values())[0] data_paths = "{}/SSD/tfrecords".format(vm_paths) checkpointpath = "{}/SSD.checkpoints/ssd_300_vgg.ckpt".format(vm_paths) TRAIN_DIR = get_outputs_path() slim = tf.contrib.slim DATA_FORMAT = 'NHWC' # =========================================================================== #