コード例 #1
0
def train_epochs(args, data_transform):
    training_dataset = DrivingDataset(root_dir=args.train_dir,
                                      categorical=True,
                                      classes=args.n_steering_classes,
                                      transform=data_transform)

    validation_dataset = DrivingDataset(root_dir=args.validation_dir,
                                        categorical=True,
                                        classes=args.n_steering_classes,
                                        transform=data_transform)

    training_iterator = DataLoader(training_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=2)
    validation_iterator = DataLoader(validation_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=2)
    driving_policy = DiscreteDrivingPolicy(
        n_classes=args.n_steering_classes).to(DEVICE)

    opt = torch.optim.Adam(driving_policy.parameters(), lr=args.lr)
    args.start_time = time.time()

    args.class_dist = get_class_distribution(training_iterator, args)
    best_val_accuracy = 0
    for epoch in range(args.n_epochs):
        print('EPOCH ', epoch)

        train_discrete(driving_policy, training_iterator, opt, args)
        acc = test_discrete(driving_policy, validation_iterator, opt, args)
        if acc >= best_val_accuracy:
            torch.save(driving_policy.state_dict(), args.weights_out_file)
            best_val_accuracy = acc
コード例 #2
0
def main(args,driving_policy=None):

    data_transform = transforms.Compose([ transforms.ToPILImage(),
                                          transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
                                          transforms.RandomRotation(degrees=80),
                                          transforms.ToTensor()])

    training_dataset = DrivingDataset(root_dir=args.train_dir,
                                      categorical=True,
                                      classes=args.n_steering_classes,
                                      transform=data_transform)

    validation_dataset = DrivingDataset(root_dir=args.validation_dir,
                                        categorical=True,
                                        classes=args.n_steering_classes,
                                        transform=data_transform)

    training_iterator = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
    validation_iterator = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)


    opt = torch.optim.Adam(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr)
    args.start_time = time.time()


    args.class_dist = get_class_distribution(training_iterator, args)

    best_val_accuracy = 0
    for epoch in range(args.n_epochs):
        print ('EPOCH ', epoch)

        train_discrete(driving_policy, training_iterator, opt, args)
        torch.save(driving_policy.state_dict(), args.weights_out_file)

    return driving_policy
コード例 #3
0
def main(args):

    data_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.1,
                               saturation=0.1,
                               hue=0.1),
        transforms.RandomRotation(degrees=80),
        transforms.ToTensor()
    ])

    training_dataset = DrivingDataset(root_dir=args.train_dir,
                                      categorical=True,
                                      classes=args.n_steering_classes,
                                      transform=data_transform)

    validation_dataset = DrivingDataset(root_dir=args.validation_dir,
                                        categorical=True,
                                        classes=args.n_steering_classes,
                                        transform=data_transform)

    training_iterator = DataLoader(training_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=1)
    validation_iterator = DataLoader(validation_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=1)
    driving_policy = DiscreteDrivingPolicy(
        n_classes=args.n_steering_classes).to(DEVICE)

    opt = torch.optim.Adam(driving_policy.parameters(), lr=args.lr)
    args.start_time = time.time()

    print(driving_policy)
    print(opt)
    print(args)

    args.class_dist = get_class_distribution(training_iterator, args)

    best_val_accuracy = 0
    for epoch in range(args.n_epochs):
        print('EPOCH ', epoch)
        #
        # YOUR CODE GOES HERE
        #

        # Train the driving policy
        # Evaluate the driving policy on the validation set
        # If the accuracy on the validation set is a new high then save the network weights
        train_discrete(driving_policy, training_iterator, opt, args)
        acc = test_discrete(driving_policy, validation_iterator, opt, args)
        if acc > best_val_accuracy:
            torch.save(driving_policy.state_dict(), args.weights_out_file)
            best_val_accuracy = acc
    return driving_policy
def main(args,driving_policy=None):
    train_loss = []
    test_acc = []

    data_transform = transforms.Compose([ transforms.ToPILImage(),
                                          transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
                                          transforms.RandomRotation(degrees=80),
                                          transforms.ToTensor()])

    training_dataset = DrivingDataset(root_dir=args.train_dir,
                                      categorical=True,
                                      classes=args.n_steering_classes,
                                      transform=data_transform)

    validation_dataset = DrivingDataset(root_dir=args.validation_dir,
                                        categorical=True,
                                        classes=args.n_steering_classes,
                                        transform=data_transform)

    training_iterator = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
    validation_iterator = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
    if (driving_policy == None):
        print("New policy instantiated.")
        # driving_policy = OriginalDrivingPolicy(n_classes=args.n_steering_classes).to(DEVICE)
        # driving_policy = RecurrentNetwork(n_classes=args.n_steering_classes).to(DEVICE)
        driving_policy = AttentionNetwork(n_classes=args.n_steering_classes).to(DEVICE)
        # driving_policy = RecurrentAttention(n_classes=args.n_steering_classes).to(DEVICE)
        args.freeze_last_n_layers = 0

    #opt = torch.optim.Adam(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr)
    # Adding weight decay with the weight decay problem fixed with AdamW
    opt = torch.optim.AdamW(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr, weight_decay=0.0005)

    #opt = torch.optim.Adam(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr)
    args.start_time = time.time()

    args.class_dist = get_class_distribution(training_iterator, args)

    best_val_accuracy = 0
    for epoch in range(args.n_epochs):
        print ('EPOCH ', epoch)

        curr_train_loss = train_discrete(driving_policy, training_iterator, opt, args)
        train_loss.append(curr_train_loss)

        curr_val_accuracy = test_discrete(driving_policy, validation_iterator, opt, args)

        if (curr_val_accuracy > best_val_accuracy):
            best_val_accuracy = curr_val_accuracy
            torch.save(driving_policy.state_dict(), args.weights_out_file)
            test_acc.append(best_val_accuracy)
        else:
            test_acc.append(curr_val_accuracy)

    return driving_policy, train_loss, test_acc
コード例 #5
0
ファイル: dagger.py プロジェクト: chaddy1004/csc2626w21
        default=False)

    args = parser.parse_args()

    data_transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.ColorJitter(brightness=0.1,
                               contrast=0.1,
                               saturation=0.1,
                               hue=0.1),
        transforms.RandomRotation(degrees=80),
        transforms.ToTensor()
    ])

    training_dataset = DrivingDataset(root_dir=args.train_dir,
                                      categorical=True,
                                      classes=args.n_steering_classes,
                                      transform=data_transform)

    validation_dataset = DrivingDataset(root_dir=args.validation_dir,
                                        categorical=True,
                                        classes=args.n_steering_classes,
                                        transform=data_transform)

    training_iterator = DataLoader(training_dataset,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   num_workers=0)
    validation_iterator = DataLoader(validation_dataset,
                                     batch_size=args.batch_size,
                                     shuffle=True,
                                     num_workers=0)
def main(args,driving_policy=None):
    train_loss = []
    test_acc = []

    data_transform = transforms.Compose([ transforms.ToPILImage(),
                                          transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
                                          transforms.RandomRotation(degrees=80),
                                          transforms.ToTensor()])

    training_dataset = DrivingDataset(root_dir=args.train_dir,
                                      categorical=True,
                                      classes=args.n_steering_classes,
                                      transform=data_transform)

    validation_dataset = DrivingDataset(root_dir=args.validation_dir,
                                        categorical=True,
                                        classes=args.n_steering_classes,
                                        transform=data_transform)

    training_iterator = DataLoader(training_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
    validation_iterator = DataLoader(validation_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
    if (driving_policy == None):
        print("New policy instantiated.")
        if args.policy == 'OriginalDrivingPolicy':
            driving_policy = OriginalDrivingPolicy(n_classes=args.n_steering_classes).to(DEVICE)
        elif args.policy == 'RecurrentNetwork':
            driving_policy = RecurrentNetwork(n_classes=args.n_steering_classes).to(DEVICE)
        elif args.policy == 'AttentionNetwork':
            driving_policy = AttentionNetwork(n_classes=args.n_steering_classes).to(DEVICE)
        elif args.policy == 'RecurrentAttention':
            driving_policy = RecurrentAttention(n_classes=args.n_steering_classes).to(DEVICE)

        if args.policy == 'OriginalDrivingPolicyDropOut':
            driving_policy = OriginalDrivingPolicyDropOut(n_classes=args.n_steering_classes).to(DEVICE)
        elif args.policy == 'RecurrentNetworkDropOut':
            driving_policy = RecurrentNetworkDropOut(n_classes=args.n_steering_classes).to(DEVICE)
        elif args.policy == 'AttentionNetworkDropOut':
            driving_policy = AttentionNetworkDropOut(n_classes=args.n_steering_classes).to(DEVICE)
        elif args.policy == 'RecurrentAttentionDropOut':
            driving_policy = RecurrentAttentionDropOut(n_classes=args.n_steering_classes).to(DEVICE)
        args.freeze_last_n_layers = 0

    if args.is_l2:
        # Adding weight decay with the weight decay problem fixed with AdamW
        opt = torch.optim.AdamW(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr, weight_decay=args.wd)
    else:
        opt = torch.optim.Adam(filter(lambda p: p.requires_grad, driving_policy.parameters()), lr=args.lr)
    args.start_time = time.time()

    args.class_dist = get_class_distribution(training_iterator, args)

    best_val_accuracy = 0
    for epoch in range(args.n_epochs):
        print ('EPOCH ', epoch)

        curr_train_loss = train_discrete(driving_policy, training_iterator, opt, args)
        train_loss.append(curr_train_loss)

        curr_val_accuracy = test_discrete(driving_policy, validation_iterator, opt, args)

        if (curr_val_accuracy > best_val_accuracy):
            best_val_accuracy = curr_val_accuracy
            torch.save(driving_policy.state_dict(), args.weights_out_file)
            test_acc.append(best_val_accuracy)
        else:
            test_acc.append(curr_val_accuracy)

    if args.is_l2:
        if args.output_log:
            train_log = './logs/{}_Loss_weight_decay.txt'.format(args.policy)
            test_log = './logs/{}_Acc_weight_decay.txt'.format(args.policy)

            with open(train_log, 'w') as f_train:
                for loss in train_loss:
                    f_train.write(str(loss) + '\n')
            with open(test_log, 'w') as f_test:
                for accuracy in test_acc:
                    f_test.write(str(accuracy) + '\n')

    elif args.is_dropout:
        if args.output_log:
            train_log = './logs/{}_Loss_drop_out.txt'.format(args.policy)
            test_log = './logs/{}_Acc_drop_out.txt'.format(args.policy)

            with open(train_log, 'w') as f_train:
                for loss in train_loss:
                    f_train.write(str(loss) + '\n')
            with open(test_log, 'w') as f_test:
                for accuracy in test_acc:
                    f_test.write(str(accuracy) + '\n')
    else:
        if args.output_log:
            train_log = './logs/{}_Loss.txt'.format(args.policy)
            test_log = './logs/{}_Acc.txt'.format(args.policy)

            with open(train_log, 'w') as f_train:
                for loss in train_loss:
                    f_train.write(str(loss) + '\n')
            with open(test_log, 'w') as f_test:
                for accuracy in test_acc:
                    f_test.write(str(accuracy) + '\n')


    return driving_policy, train_loss, test_acc