Example #1
0
from test import test_function


# Define calculate_shipping_cost() here:
def calculate_shipping_cost(from_coords, to_coords, shipping_type='Overnight'):
    from_lat, from_long = from_coords
    to_lat, to_long = to_coords
    distance = get_distance(from_lat, from_long, to_lat, to_long)
    shipping_rate = SHIPPING_PRICES[shipping_type]
    price = distance * shipping_rate
    return format_price(price)


# Test the function by calling
# test_function(calculate_shipping_cost)
test_function(calculate_shipping_cost)


# Define calculate_driver_cost() here
def calculate_driver_cost(distance, *drivers):
    cheapest_driver = None
    cheapest_driver_price = None

    for driver in drivers:
        driver_time = driver.speed * distance
        price_for_driver = driver.salary * driver_time
        if cheapest_driver is None:
            cheapest_driver = driver
            cheapest_driver_price = price_for_driver
        elif price_for_driver < cheapest_driver_price:
            cheapest_driver = driver
def main():
    global epochs_since_improvement, start_epoch, best_loss, epoch, checkpoint

    train_data = Dataset(root=train_path,
                         txt_path=train_1_txt,
                         class_to_idx=class_to_idx,
                         transforms=data_transforms)
    train_data, valid_data, test_data = torch.utils.data.random_split(
        train_data, [600, 116, 150])
    print('train_data size: ', len(train_data))
    print('valid_data_size: ', len(valid_data))
    print('test_data_size: ', len(test_data))

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               num_workers=2,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=batch_size,
                                              num_workers=1,
                                              shuffle=True)
    valid_loader = torch.utils.data.DataLoader(valid_data,
                                               batch_size=batch_size,
                                               num_workers=1,
                                               shuffle=True)
    # fine-tuning
    #model = models.resnet50(pretrained=True)
    model = models.densenet121(pretrained=True)

    for param in model.parameters():
        param.requires_grad = True

    #for resnet50
    model.fc = nn.Sequential(
        OrderedDict([('fcl1', nn.Linear(2048, 1024)), ('dp1', nn.Dropout(0.3)),
                     ('r1', nn.ReLU()), ('fcl2', nn.Linear(1024, 128)),
                     ('dp2', nn.Dropout(0.3)), ('r2', nn.ReLU()),
                     ('fcl3', nn.Linear(128, 5)), ('out', nn.Softmax(dim=1))]))
    '''
    #for densenet121
    model.classifier = nn.Sequential(OrderedDict([
        ('fc1', nn.Linear(1024, 256)),
        ('dp1', nn.Dropout(0.3)),
        ('r1', nn.ReLU()),
        ('fc2', nn.Linear(256, 32)),
        ('dcp2', nn.Dropout(0.3)),
        ('r2', nn.ReLU()),
        ('fc3', nn.Linear(32, 5)),
        ('out', nn.Softmax(dim=1))
        ]))
    '''
    train_on_gpu = torch.cuda.is_available()
    if train_on_gpu:
        print('GPU is  available :)   Training on GPU ...')
    else:
        print('GPU is not available :(  Training on CPU ...')

    #need to remove comment after first trainning
    checkpoint_path = '/home/tianshu/bladder-cancer/code/checkpoint_resnet50.pth.tar'
    checkpoint = torch.load(checkpoint_path)
    if checkpoint is None:
        optimizer = optim.Adam(model.parameters())
    else:
        #load checkpoint
        #checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epoch_since_improvement']
        best_loss = checkpoint['best_loss']
        print(
            '\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' %
            (start_epoch, best_loss))
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    from torch.optim.lr_scheduler import StepLR
    criterion = nn.CrossEntropyLoss()
    scheduler = StepLR(optimizer, step_size=10, gamma=0.9)
    #train the model
    for epoch in range(start_epoch, epochs):
        val_loss = train_function(model,
                                  train_loader,
                                  valid_loader,
                                  criterion=criterion,
                                  optimizer=optimizer,
                                  train_on_gpu=train_on_gpu,
                                  epoch=epoch,
                                  device=device,
                                  scheduler=scheduler)

        # Did validation loss improve?
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))

        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        val_loss, best_loss, is_best)

    test_function(model, test_loader, device, criterion, cat_to_name)
    cheapest_driver = None
    cheapest_driver_price = None

    for driver in drivers:
        driver_time = driver.speed * distance
        price_for_driver = driver.salary * driver_time
        if cheapest_driver is None:
            cheapest_driver = driver
            cheapest_driver_price = price_for_driver
        elif price_for_driver < cheapest_driver_price:
            cheapest_driver = driver
            cheapest_driver_price = price_for_driver
    return cheapest_driver_price, cheapest_driver


# Test the function by calling
# test_function(calculate_driver_cost)


# Define calculate_money_made() here
def calculate_money_made(**trips):
    total_money_made = 0
    for trip in trips.values():
        trip_revenue = (trip.cost - trip.driver.cost)
        total_money_made + trip_revenue
    return total_money_made


# Test the function by calling
test_function(calculate_money_made)
Example #4
0
from test import test_function # here is have imported information form the file test.py


## Shipping Costs Calculated Here:

# note, from_coords and to_coords are tuples that contain the latitude and then the longitude, see nile.py.
def calculate_shipping_cost(from_coords, to_coords, shipping_type = 'Overnight'): # note shipping_type has been set to Overnight by defauly, becuase some customers forget to click this. It is key information so we need to put something in here.  
  #from_lat, from_long = from_coords # unpacking 
  #to_lat, to_long = to_coords # unpacking 
  distance = get_distance(*from_coords, *to_coords) #This is the same as doing this: distance = get_distance(from_lat, from_long, to_lat, to_long), just in a more compact way. Also, we don't need to unpack them in the previous two lines. 
  shipping_rate = SHIPPING_PRICES[shipping_type]
  price = distance * shipping_rate
  return format_price(price)

# Test the function by calling 
test_function(calculate_shipping_cost)

# Define calculate_driver_cost() here
def calculate_driver_cost(distance, *drivers):
  cheapest_driver = None
  cheapest_driver_price = None
  for driver in drivers:
    driver_time = driver.speed * distance
    price_for_driver = driver.salary * driver_time
    if cheapest_driver is None: # if the cheapest driver has not yet been set. 
      cheapest_driver = driver
      cheapest_driver_price = price_for_driver
    elif price_for_driver < cheapest_driver_price:
      cheapest_driver = driver
      cheapest_driver_price = price_for_driver    
  return cheapest_driver_price, cheapest_driver
Example #5
0
def main():
    global epochs_since_improvement, start_epoch, best_loss, epoch, checkpoint

    #balance three labels sample in ../code/original/fix.py
    train_data = Pneumonia(txt=balanced_txt,
                           mode='train',
                           class_to_idx=class_to_idx,
                           transforms=data_transforms['train'])
    train_data, valid_data, test_data = torch.utils.data.random_split(
        train_data, [7000, 2000, 3000])
    print('train_data size: ', len(train_data))
    print('valid_data_size: ', len(valid_data))
    print('test_data_size: ', len(test_data))

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=batch_size,
                                               num_workers=0,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=batch_size,
                                              num_workers=0,
                                              shuffle=True)
    valid_loader = torch.utils.data.DataLoader(valid_data,
                                               batch_size=batch_size,
                                               num_workers=0,
                                               shuffle=True)

    # we will use a pretrained model and we are going to change only the last layer
    model = models.densenet121(pretrained=True)
    #model = models.resnet50(pretrained=True)
    for param in model.parameters():
        param.requires_grad = True

    model.classifier = nn.Sequential(
        OrderedDict([
            ('fcl1', nn.Linear(1024, 256)),
            ('dp1', nn.Dropout(0.3)),
            ('r1', nn.ReLU()),
            ('fcl2', nn.Linear(256, 32)),
            ('dp2', nn.Dropout(0.3)),
            ('r2', nn.ReLU()),
            ('fcl3', nn.Linear(32, 3)),
            #('out', nn.Softmax(dim=1)),
        ]))

    train_on_gpu = torch.cuda.is_available()
    if train_on_gpu:
        print('GPU is  available :)   Training on GPU ...')
    else:
        print('GPU is not available :(  Training on CPU ...')

    #need to remove comment after first trainning
    checkpoint = torch.load('/home/tianshu/pneumonia/code/checkpoint.pth.tar',
                            map_location={'cuda:2': 'cuda:0'})
    if checkpoint is None:
        optimizer = optim.Adadelta(model.parameters())
    else:
        #load checkpoint
        #checkpoint = torch.load(checkpoint)
        start_epoch = checkpoint['epoch'] + 1
        epochs_since_improvement = checkpoint['epoch_since_improvement']
        best_loss = checkpoint['best_loss']
        print(
            '\nLoaded checkpoint from epoch %d. Best loss so far is %.3f.\n' %
            (start_epoch, best_loss))
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']

    criterion = nn.CrossEntropyLoss()

    #train the model
    for epoch in range(start_epoch, epochs):
        val_loss = train_function(model,
                                  train_loader,
                                  valid_loader,
                                  criterion=criterion,
                                  optimizer=optimizer,
                                  train_on_gpu=train_on_gpu,
                                  epoch=epoch,
                                  device=device,
                                  scheduler=None)

        # Did validation loss improve?
        is_best = val_loss < best_loss
        best_loss = min(val_loss, best_loss)

        if not is_best:
            epochs_since_improvement += 1
            print("\nEpochs since last improvement: %d\n" %
                  (epochs_since_improvement, ))

        else:
            epochs_since_improvement = 0

        # Save checkpoint
        save_checkpoint(epoch, epochs_since_improvement, model, optimizer,
                        val_loss, best_loss, is_best)

    test_function(model, test_loader, device, criterion, cat_to_name)
Example #6
0
import test

test.test_function(5)