Ejemplo n.º 1
0
def select(id):
    sql = "SELECT * from runners WHERE id = %s"
    values = [id]
    result = run_sql(sql, values)[0]
    # if result is not None:
    runner = Runner(result['first_name'], result['last_name'], result['id'])
    return runner
Ejemplo n.º 2
0
def select_all():
    runners = []

    sql = "SELECT * from runners"
    results = run_sql(sql)

    for row in results:
        runner = Runner(row['first_name'], row['last_name'], row['id'])
        runners.append(runner)
    return runners
Ejemplo n.º 3
0
def select_runners_by_race(id):
      # Bring the runners in from that race
    # sql query that selects the runners from the runners table - inner joins your race results table where the race_id = id
    sql = "SELECT runners.*, race_results.time FROM runners INNER JOIN race_results ON race_results.runner_id = runners.id WHERE race_results.race_id = %s ORDER BY race_results.time"
    # runners = loop through and create a runner object for each result coming back
    values = [id]
    results = run_sql(sql,values)
    runners = []
    for result in results:
        runner = Runner(result["first_name"], result["last_name"], result['id'])
        runners.append(runner)
    return runners
Ejemplo n.º 4
0
    start_time = time.time()

    opt, logger = TrainOptions().parse()   
    writer = SummaryWriter(osp.join(opt.backup,'visual'))

    src_train_loader, src_val_loader, tgt_train_loader, tgt_val_loader = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    torch.backends.cudnn.benchmark = True    

    ##################################
    #Initilizing the Model
    ##################################
 #   model = DirtT(opt).cuda()
    model,discriminator = models.create_model(opt)

    criterion =  nn.CrossEntropyLoss().cuda()
    runner = Runner(opt, model,discriminator, criterion, writer)
    
#    model.load_state_dict(torch.load('/home/liguangrui/domain_adaptation_base_code/checkpoints/baselineV4+VADA/Best.pth'))
 #   teacher.load_state_dict(torch.load('/home/liguangrui/domain_adaptation_base_code/checkpoints/baselineV4+VADA/Best.pth'))

    model.load_state_dict(torch.load('/home/liguangrui/domain_adaptation_base_code/checkpoints/baselineV9+vada+noNoise/latest.pth'))
        
    

#    criterion =  nn.CrossEntropyLoss().cuda() 
  #  runner = Runner(opt, model, criterion, writer)
    
    ##################################
    #Start Training
    ##################################
    #train_loader = JointLoader(src_train_loader, tgt_train_loader)
    start_time = time.time()

    opt, logger = TrainOptions().parse()
    writer = SummaryWriter(osp.join(opt.backup, 'visual'))

    src_train_loader, src_val_loader, tgt_train_loader, tgt_val_loader = create_dataset(
        opt)  # create a dataset given opt.dataset_mode and other options
    torch.backends.cudnn.benchmark = True

    ##################################
    #Initilizing the Model
    ##################################
    model, discriminator = models.create_model(opt)

    criterion = nn.CrossEntropyLoss().cuda()
    runner = Runner(opt, model, discriminator, criterion, writer)

    ##################################
    #Start Training
    ##################################
    train_loader = JointLoader(src_train_loader, tgt_train_loader)

    for epoch in range(1, opt.epoch_count + 1):
        runner.train(epoch, train_loader, logger)
        if epoch % opt.validate_freq == 0 and opt.val:
            #     runner.validate(epoch, src_val_loader, logger)
            runner.validate2(epoch, tgt_val_loader, logger)
    #     runner.visualize(src_val_loader, tgt_val_loader, epoch)

        if epoch % opt.save_epoch_freq == 0:
            print('saving the model at the end of epoch %d' % (epoch))
Ejemplo n.º 6
0
def payloadExec(armInfo):
    cmd = armInfo['cmd']
    shell = armInfo['shell']
    runner = Runner(shell)
    return runner.execute(cmd)
Ejemplo n.º 7
0
import pdb

from models.race_result import Race_result
import repositories.race_result_repository as race_result_repository

from models.runner import Runner
import repositories.runner_repository as runner_repository

from models.race import Race
import repositories.race_repository as race_repository

race_result_repository.delete_all()
runner_repository.delete_all()
race_repository.delete_all()

runner_1 = Runner("Steven", "McFarlane")
runner_repository.save(runner_1)
runner_2 = Runner("Joanna", "McFarlane")
runner_repository.save(runner_2)
runner_3 = Runner("Vito", "Corleone")
runner_repository.save(runner_3)
runner_4 = Runner("Malcolm", "Tucker")
runner_repository.save(runner_4)
runner_5 = Runner("Jimmy", "Garroppolo")
runner_repository.save(runner_5)
runner_6 = Runner("Eddie", "Vedder")
runner_repository.save(runner_6)
runner_7 = Runner("Sabrina", "Pace")
runner_repository.save(runner_7)
runner_8 = Runner("Jasmine", "Paris")
runner_repository.save(runner_8)
Ejemplo n.º 8
0
        continue
    if model_type != 0:
        try:
            epochs = int(
                input("Select number of epochs:\r\n0. 1\r\n1. 25\r\n2. "
                      "50\r\n3. 100\r\n4. 250\r\n4. 500\r\n"))
            if not (0 <= epochs <= 4):
                raise Exception
            epochs_num = 0
            if epochs == 0:
                epochs_num = 1
            elif epochs == 1:
                epochs_num = 25
            elif epochs == 2:
                epochs_num = 50
            elif epochs == 3:
                epochs_num = 100
            elif epochs == 4:
                epochs_num = 250
            elif epochs == 5:
                epochs_num = 500
        except Exception:
            print("\r\nInvalid choice!\r\n")
            continue
    break

if model_type == 0:
    Classic.run(run_type, dataset)
else:
    Runner.run(run_type, dataset, model_type, epochs_num)
Ejemplo n.º 9
0
                    level=logging.INFO,
                    format='%(asctime)-15s %(message)s')
logging.info(pformat(params.dict))

os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
np.random.seed(args.seed)
tf.set_random_seed(args.seed)

# data
trainset = get_dataset(params, 'train')
validset = get_dataset(params, 'valid')
testset = get_dataset(params, 'test')
logging.info((f"trainset: {trainset.size}", f"validset: {validset.size}",
              f"testset: {testset.size}"))

# model
model = get_model(params)
runner = Runner(params, model)
runner.set_dataset(trainset, validset, testset)

# run
if args.mode == 'train':
    runner.run()
elif args.mode == 'resume':
    model.load()
    runner.run()
elif args.mode == 'test':
    runner.evaluate()
else:
    raise ValueError()
def update_runner(id):
    first_name = request.form["first_name"]
    last_name = request.form["last_name"]
    new_runner = Runner(first_name, last_name, id)
    runner_repository.update(new_runner)
    return redirect("/runners")