예제 #1
0
def image_def(image: str, model=None):
    """
    Returns the image as Clothing so it can be used for matching
    :param image: the str with the location of the file
    :param model: the test model, if one is not provided it will be generated in function
    :return: the src file classified as a clothing item
    """

    class_names = [
        'T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal',
        'Shirt', 'Sneaker', 'Bag', 'Ankle boot'
    ]

    if model is None:
        model = build_model()

    __image = rgb2gray(io.imread(image))

    __image = transform.resize(__image, (1, 28, 28), mode='symmetric')

    __image = model.predict(__image)

    predict = np.argmax(__image)

    clothing_type = class_names[predict]

    if isinstance(image, str):
        _image = PIL.Image.open(image)
    else:
        _image = image

    pattern = detect_pattern(_image)
    color = detect_clothing_color(_image)
    return Clothing(clothing_type, pattern, color)
예제 #2
0
class TestClothingClass(unittest.TestCase):
    def setUp(self):
        self.clothing = Clothing('orange', 'M', 'stripes', 35)
        self.blouse = Blouse('blue', 'M', 'luxury', 40, 'Brazil')
        self.pants = Pants('black', 32, 'baggy', 60, 30)

    def test_initialization(self):
        self.assertEqual(self.clothing.color, 'orange',
                         'color should be orange')
        self.assertEqual(self.clothing.price, 35, 'incorrect price')

        self.assertEqual(self.blouse.color, 'blue', 'color should be blue')
        self.assertEqual(self.blouse.size, 'M', 'incorrect size')
        self.assertEqual(self.blouse.style, 'luxury', 'incorrect style')
        self.assertEqual(self.blouse.price, 40, 'incorrect price')
        self.assertEqual(self.blouse.country_of_origin, 'Brazil',
                         'incorrect country of origin')

    def test_calculateshipping(self):
        self.assertEqual(self.clothing.calculate_shipping(.5, 3), .5 * 3,\
         'Clothing shipping calculation not as expected')

        self.assertEqual(self.blouse.calculate_shipping(.5, 3), .5 * 3,\
         'Clothing shipping calculation not as expected')
예제 #3
0
파일: utils.py 프로젝트: cs15b047/DDP
def get_datasets(dataset_name, noise_type=None, noise_rate=None):
    val_dataset = None
    if (dataset_name == "MNIST"):
        train_dataset = MNIST(root='./data/',
                              download=True,
                              train=True,
                              transform=transforms.ToTensor(),
                              noise_type=noise_type,
                              noise_rate=noise_rate)
        test_dataset = MNIST(root='./data/',
                             download=True,
                             train=False,
                             transform=transforms.ToTensor(),
                             noise_type=noise_type,
                             noise_rate=noise_rate)
    elif (dataset_name == "CIFAR10"):
        train_dataset = CIFAR10(root='./data/',
                                download=True,
                                train=True,
                                transform=get_transform(),
                                noise_type=noise_type,
                                noise_rate=noise_rate)
        test_dataset = CIFAR10(root='./data/',
                               download=True,
                               train=False,
                               transform=transforms.ToTensor(),
                               noise_type=noise_type,
                               noise_rate=noise_rate)
    elif (dataset_name == "CIFAR100"):
        train_dataset = CIFAR100(root='./data/',
                                 download=True,
                                 train=True,
                                 transform=get_transform(),
                                 noise_type=noise_type,
                                 noise_rate=noise_rate)
        test_dataset = CIFAR100(root='./data/',
                                download=True,
                                train=False,
                                transform=transforms.ToTensor(),
                                noise_type=noise_type,
                                noise_rate=noise_rate)
    elif (dataset_name == "Clothes1M"):
        img_sz = 224
        train_dataset = Clothing(data_dir='../clothing1M/',
                                 train=True,
                                 val=False,
                                 test=False,
                                 transform=clothing_transform(img_sz))
        val_dataset = Clothing(data_dir='../clothing1M/',
                               train=False,
                               val=True,
                               test=False,
                               transform=clothing_transform(img_sz))
        test_dataset = Clothing(data_dir='../clothing1M/',
                                train=False,
                                val=False,
                                test=True,
                                transform=clothing_transform(img_sz))
    elif (dataset_name == "Food101N"):
        img_sz = 224
        train_dataset = Food101N(data_dir='../Food-101N_release/',
                                 train=True,
                                 val=False,
                                 test=False,
                                 transform=food_transform(img_sz))
        val_dataset = Food101N(data_dir='../Food-101N_release/',
                               train=False,
                               val=True,
                               test=False,
                               transform=food_transform(img_sz))
        test_dataset = Food101N(data_dir='../Food-101N_release/',
                                train=False,
                                val=False,
                                test=True,
                                transform=food_transform(img_sz))

    return train_dataset, val_dataset, test_dataset
from category import Category
from clothing import Clothing
from equipment import Equipment
cats = {
    "legs":
    Category("False Legs", [
        Equipment("Big Bat", 500, "metal", "9000"),
        Clothing("Socks", 10, "Red", 500),
        Clothing("Fluffy Socks", 100, "Green", 50),
        Clothing("Hairy Shoes", 120, "Orange", 5)
    ]),
    "bats":
    Category("Baseball Bats", [Equipment("Big Bat", 500, "metal", "9000")]),
    "fruit":
    Category("Fruit", []),
    "special":
    Category("Bobs Special Place",
             [Clothing("Metal Jumper", 200, "Yellow", 120)])
}
예제 #5
0
        self.categories = categories

    def __repr__(self):
        output = ''
        output += self.name + '\n'
        i = 1
        for c in self.categories:
            output += str(i) + '. ' + c.name + '\n'
            i += 1
        output += str(i) + '. Exit'
        return output


my_store = Store("The Dugout", [
    Category("Running", [
        Clothing('Shorts', 19.99, 'red', 12),
        Clothing('Socks', 8.99, 'white', 10)
    ]),
    Category("Baseball", [
        Equipment('baseball bat', 299.99, 'unisex', 2.5),
        Equipment('baseball', 11.99, 'kids', 0.5)
    ]),
    Category("Basketball")
])

print(my_store)

selection = 0
while selection != len(my_store.categories) + 1:
    selection = input("Select the number of a department ")
    try:
예제 #6
0
class Store:
    def __init__(self, name, categories):
        self.name = name
        self.categories = categories

    def __str__(self):
        output = f"{self.name} \n" + "Store categories include: \n"
        for i, c in enumerate(self.categories):
            output += " " + str(i + 1) + ". " + str(c) + "\n"
        return output + " 5. Exit"


my_store = Store("Leslies's Athletics", [
    Category("running", [
        Clothing("shoe", 100.00, "black", 10),
        Clothing("shorts", 50.00, "blue", "medium"),
        Clothing("shirt", 20.00, "white", "medium")
    ]),
    Category("tennis"),
    Category("basketball")
])

print(my_store)

selection = 0
while selection != len(my_store.categories) + 1:
    selection = input("\n<< Select the number of a category >> ")
    print("\n  The user selected ** " + str(selection) + " ** \n")
    try:
        # move casting to int into the try block
예제 #7
0
파일: human.py 프로젝트: WT000/COM411
    super().__init__(name, age)
    # Once done, we now overwrite the name and age with the ones specified by the user

    self.clothing = []

  # Sub class method which only humans can do
  def display(self):
    print("Hello! My name is {} and I'm a human!".format(self.name))

  def speak(self):
    print("I am human!")

  def dress(self, clothes):
    self.clothing.append(clothes)

  def undress(self, clothes):
    self.clothing.remove(clothes)

  # Overwrite the __repr__ magic method, __str__ is fine

  def __repr__(self):
    return "human=(name={}, age={}, energy={}, clothing={})".format(self.name, self.age, self.energy, self.clothing)

if (__name__ == "__main__"):
  test = Human("John", 74)

  tshirt = Clothing("Purple", "Silk", ClothingSize.LARGE)
  test.dress(tshirt)
  print(repr(test))
  test.undress(tshirt)
  print(repr(test))
예제 #8
0
from category import Category
from clothing import Clothing

# my categories
fiction = Category("Fiction", [
    Clothing("Some Product", 30.78, "red", 20),
    Clothing("Other Product", 40, "Orange", 25)
])
non_fiction = Category("Non Fiction", [])
golf_balls = Category("Golf Balls", [])
other = Category("Some other Cat", [])


# store class that has a name and a categories
class Store:
    def __init__(self, name, categories):
        self.name = name
        self.categories = categories

    def __str__(self):
        output = f"{self.name}\n"
        for i, c in enumerate(self.categories):
            output += "   " + str(i + 1) + ": " + c.name + "\n"

        output += "   " + str(i + 2) + ": Exit"
        return output


s = Store("Books n thingz", [fiction, non_fiction, golf_balls, other])

# shop_open = True
예제 #9
0
from category import Category
from clothing import Clothing
from equipment import Equipment

cats = {
    "legs": Category("False Legs", [Clothing("Hat", 23, "Red", 5), Clothing("Shirt", 23, "Green", 5)]),
    "bats": Category("Baseball Bats", [Equipment("Long Bat", 450, "Metal", 10000)]),
    "fruit": Category("Fruit", []),
    "special": Category("Bobs Special Place", [])
}

예제 #10
0
파일: shirt.py 프로젝트: ak799/AWS-Udacity
    def __init__(self, color, size, style, price, long_or_short):

        Clothing.__init__(self, color, size, style, price)
        self.long_or_short = long_or_short
def main():
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')

    parser.add_argument('--result_dir',
                        type=str,
                        help='dir to save result txt files',
                        default='results/')
    parser.add_argument('--noise_rate',
                        type=float,
                        help='corruption rate, should be less than 1',
                        default=0.5)
    parser.add_argument('--forget_rate',
                        type=float,
                        help='forget rate',
                        default=None)
    parser.add_argument('--noise_type',
                        type=str,
                        help='[pairflip, symmetric]',
                        default='symmetric')
    parser.add_argument(
        '--num_gradual',
        type=int,
        default=10,
        help=
        'how many epochs for linear drop rate, can be 5, 10, 15. This parameter is equal to Tk for R(T) in Co-teaching paper.'
    )
    parser.add_argument(
        '--exponent',
        type=float,
        default=1,
        help=
        'exponent of the forget rate, can be 0.5, 1, 2. This parameter is equal to c in Tc for R(T) in Co-teaching paper.'
    )
    parser.add_argument('--top_bn', action='store_true')
    parser.add_argument('--dataset',
                        type=str,
                        help='mnist, cifar10, or cifar100',
                        default='mnist')
    parser.add_argument('--n_epoch', type=int, default=300)
    parser.add_argument('--seed', type=int, default=1)
    parser.add_argument('--print_freq', type=int, default=50)
    parser.add_argument('--num_workers',
                        type=int,
                        default=2,
                        help='how many subprocesses to use for data loading')
    parser.add_argument('--num_iter_per_epoch', type=int, default=400)
    parser.add_argument('--epoch_decay_start', type=int, default=80)
    parser.add_argument('--eps', type=float, default=9.9)

    parser.add_argument('--batch-size',
                        type=int,
                        default=128,
                        metavar='N',
                        help='input batch size for training (default: 256)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=4000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.2,
                        metavar='LR',
                        help='learning rate (default: 0.01)')  #sm 0.5   lr=0.5
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=100,
        metavar='N',
        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument(
        '--noise-level',
        type=float,
        default=80.0,
        help=
        'percentage of noise added to the data (values from 0. to 100.), default: 80.'
    )
    parser.add_argument(
        '--root-dir',
        type=str,
        default='.',
        help=
        'path to CIFAR dir where cifar-10-batches-py/ and cifar-100-python/ are located. If the datasets are not downloaded, they will automatically be and extracted to this path, default: .'
    )
    args = parser.parse_args()

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    batch_size = args.batch_size

    # if args.dataset=='mnist':
    #     input_channel=1
    #     num_classes=10
    #     args.top_bn = False
    #     args.epoch_decay_start = 80
    #     args.n_epoch = 200
    #     train_dataset = MNIST(root='./data/',
    #                                 download=True,
    #                                 train=True,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                          )
    #
    #     test_dataset = MNIST(root='./data/',
    #                                download=True,
    #                                train=False,
    #                                transform=transforms.ToTensor(),
    #                                noise_type=args.noise_type,
    #                                noise_rate=args.noise_rate
    #                         )
    #
    # if args.dataset=='cifar10':
    #     input_channel=3
    #     num_classes=10
    #     args.top_bn = False
    #     args.epoch_decay_start = 80
    #     args.n_epoch = 200
    #     train_dataset = CIFAR10(root='./data/',
    #                                 download=True,
    #                                 train=True,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                            )
    #
    #     test_dataset = CIFAR10(root='./data/',
    #                                 download=True,
    #                                 train=False,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                           )
    #
    # if args.dataset=='cifar100':
    #     input_channel=3
    #     num_classes=100
    #     args.top_bn = False
    #     args.epoch_decay_start = 100
    #     args.n_epoch = 200
    #     train_dataset = CIFAR100(root='./data/',
    #                                 download=True,
    #                                 train=True,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                             )
    #
    #     test_dataset = CIFAR100(root='./data/',
    #                                 download=True,
    #                                 train=False,
    #                                 transform=transforms.ToTensor(),
    #                                 noise_type=args.noise_type,
    #                                 noise_rate=args.noise_rate
    #                             )
    # if args.forget_rate is None:
    #     forget_rate=args.noise_rate
    # else:
    #     forget_rate=args.forget_rate
    #
    # noise_or_not = train_dataset.noise_or_not
    # # Data Loader (Input Pipeline)
    # print('loading dataset...')
    # train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
    #                                            batch_size=batch_size,
    #                                            num_workers=args.num_workers,
    #                                            drop_last=True,
    #                                            shuffle=True)
    #
    # test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
    #                                           batch_size=batch_size,
    #                                           num_workers=args.num_workers,
    #                                           drop_last=True,
    #                                           shuffle=False)
    '''
    mean = [0.4914, 0.4822, 0.4465]
    std = [0.2023, 0.1994, 0.2010]

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])'''
    if args.dataset == 'cifar10':
        mean = [0.4914, 0.4822, 0.4465]
        std = [0.2023, 0.1994, 0.2010]

        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])
        trainset = datasets.CIFAR10(root=args.root_dir,
                                    train=True,
                                    download=True,
                                    transform=transform_train)
        trainset_track = datasets.CIFAR10(root=args.root_dir,
                                          train=True,
                                          transform=transform_train)
        testset = datasets.CIFAR10(root=args.root_dir,
                                   train=False,
                                   transform=transform_test)
        num_classes = 10
    elif args.dataset == 'cifar100':
        mean = [0.4914, 0.4822, 0.4465]
        std = [0.2023, 0.1994, 0.2010]

        transform_train = transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])

        transform_test = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean, std),
        ])
        trainset = datasets.CIFAR100(root=args.root_dir,
                                     train=True,
                                     download=True,
                                     transform=transform_train)
        trainset_track = datasets.CIFAR100(root=args.root_dir,
                                           train=True,
                                           transform=transform_train)
        testset = datasets.CIFAR100(root=args.root_dir,
                                    train=False,
                                    transform=transform_test)
        num_classes = 100
    elif args.dataset == 'imagenet_tiny':
        init_epoch = 100
        num_classes = 200
        #data_root = '/home/xingyu/Data/phd/data/imagenet-tiny/tiny-imagenet-200'
        data_root = '/home/iedl/w00536717/coteaching_plus-master/data/imagenet-tiny/tiny-imagenet-200'
        train_kv = "train_noisy_%s_%s_kv_list.txt" % (args.noise_type,
                                                      args.noise_rate)
        test_kv = "val_kv_list.txt"

        normalize = transforms.Normalize(mean=[0.4802, 0.4481, 0.3975],
                                         std=[0.2302, 0.2265, 0.2262])

        trainset = ImageFilelist(root=data_root,
                                 flist=os.path.join(data_root, train_kv),
                                 transform=transforms.Compose([
                                     transforms.RandomResizedCrop(56),
                                     transforms.RandomHorizontalFlip(),
                                     transforms.ToTensor(),
                                     normalize,
                                 ]))
        trainset_track = ImageFilelist(root=data_root,
                                       flist=os.path.join(data_root, train_kv),
                                       transform=transforms.Compose([
                                           transforms.RandomResizedCrop(56),
                                           transforms.RandomHorizontalFlip(),
                                           transforms.ToTensor(),
                                           normalize,
                                       ]))

        testset = ImageFilelist(root=data_root,
                                flist=os.path.join(data_root, test_kv),
                                transform=transforms.Compose([
                                    transforms.Resize(64),
                                    transforms.CenterCrop(56),
                                    transforms.ToTensor(),
                                    normalize,
                                ]))
    elif args.dataset == 'clothing1M':
        init_epoch = 100
        num_classes = 14
        data_root = '/home/iedl/w00536717/data/cloting1m/'
        #train_kv =
        #test_kv =

        train_transform = transforms.Compose([
            transforms.Resize((256, 256)),
            transforms.RandomCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        test_transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]),
        ])
        '''
        train_transform = transforms.Compose([
                transforms.Resize(256),
                transforms.RandomCrop(224),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),                
                transforms.Normalize((0.6959, 0.6537, 0.6371),(0.3113, 0.3192, 0.3214)),                     
            ]) 
        test_transform = transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize((0.6959, 0.6537, 0.6371),(0.3113, 0.3192, 0.3214)),
            ]) 
        '''
    ''' 
    train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4,
                                               pin_memory=True)
    train_loader_track = torch.utils.data.DataLoader(trainset_track, batch_size=args.batch_size, shuffle=False,
                                                     num_workers=4, pin_memory=True)
    test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, num_workers=4,
                                              pin_memory=True)
    '''
    train_dataset = Clothing(root=data_root,
                             img_transform=train_transform,
                             train=True,
                             valid=False,
                             test=False)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=32)
    train_dataset_track = Clothing(root=data_root,
                                   img_transform=train_transform,
                                   train=True,
                                   valid=False,
                                   test=False)
    train_loader_track = torch.utils.data.DataLoader(
        dataset=train_dataset_track,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=32)
    valid_dataset = Clothing(root=data_root,
                             img_transform=train_transform,
                             train=False,
                             valid=True,
                             test=False)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=32)
    test_dataset = Clothing(root=data_root,
                            img_transform=test_transform,
                            train=False,
                            valid=False,
                            test=True)
    test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=32)
    #print(dir(train_loader))
    #labels = get_data_cifar_2(train_loader_track)  # it should be "clonning"
    #noisy_labels = add_noise_cifar_wo(train_loader, args.noise_level,
    #                                 args.noise_type)  # it changes the labels in the train loader directly
    #noisy_labels_track = add_noise_cifar_wo(train_loader_track, args.noise_level, args.noise_type)

    # Define models
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7"
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    #cnn = PreResNet_two.ResNet18(num_classes=num_classes).to(device)
    cnn = MyResNet_zero.MyCustomResnet(num_classes)  #.to(device)
    cnn = nn.DataParallel(cnn, device_ids=[0, 1, 2, 3, 4, 5, 6, 7])
    cnn.to(device)
    cnn.cuda()
    #print(model.parameters)
    #optimizer1 = torch.optim.SGD(cnn1.parameters(), lr=learning_rate)
    optimizer = torch.optim.SGD(cnn.parameters(),
                                lr=args.lr,
                                weight_decay=1e-3,
                                momentum=args.momentum)
    #optimizer = torch.optim.Adam(cnn.parameters(), lr=args.lr,weight_decay=1e-4)
    #optimizer1 = torch.optim.SGD(cnn.parameters(), lr=1e-2,weight_decay=1e-4,momentum=args.momentum)
    bmm_model = bmm_model_maxLoss = bmm_model_minLoss = 0

    acc = []
    loss = []
    loss_pure = []
    loss_corrupt = []
    out = []
    temp = 1
    for epoch in range(1, args.n_epoch + 1):
        if epoch < 3:
            #epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
            #    track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
            #                        bmm_model_minLoss)
            #l1,temp=train(args, cnn, device, train_loader, optimizer, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss,temp)
            l1 = train_CE(args, cnn, device, train_loader, optimizer, epoch)
            #adjust_learning_rate(optimizer, 0.1)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l1=train_true_label(args, cnn, device, train_loader, optimizer, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l1=train_CE(args, cnn, device, train_loader, optimizer, epoch)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #loss.append(l1)
            acc.append(test(args, cnn, device, test_loader))
        elif epoch < 80:
            if epoch == 3:
                adjust_learning_rate(optimizer, args.lr / 10)
            epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
                track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
                                    bmm_model_minLoss)
            l1, temp = train(args, cnn, device, train_loader, optimizer, epoch,
                             bmm_model, bmm_model_maxLoss, bmm_model_minLoss,
                             temp, num_classes)
            #l1=train_CE(args, cnn, device, train_loader, optimizer, epoch)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            acc.append(test(args, cnn, device, test_loader))
        elif epoch < 200:
            if epoch == 10:
                adjust_learning_rate(optimizer, args.lr / 1000)
            elif epoch == 100:
                adjust_learning_rate(optimizer, args.lr / 5000)
            elif epoch == 160:
                adjust_learning_rate(optimizer, args.lr / 25000)
            epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
                track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
                                    bmm_model_minLoss)

            l1, temp = train(args, cnn, device, train_loader, optimizer, epoch,
                             bmm_model, bmm_model_maxLoss, bmm_model_minLoss,
                             temp, num_classes)
            #adjust_learning_rate(optimizer, args.lr/1000)
            #epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
            #    track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
            #                        bmm_model_minLoss)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #acc.append(test(args, cnn, device, test_loader))
            #l1=train_true_label(args, cnn, device, train_loader, optimizer, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #l3=train_DMI(args, cnn, device, train_loader, optimizer1, epoch, bmm_model, bmm_model_maxLoss, bmm_model_minLoss, criterion = DMI_loss)
            #l1 = train_together(args, cnn, device, train_loader, optimizer, epoch, bmm_model, bmm_model_maxLoss,
            #                      bmm_model_minLoss)
            #loss.append(l1)
            #out.append(out10)
            acc.append(test(args, cnn, device, test_loader))
        else:
            #adjust_learning_rate(optimizer, args.lr/10000)
            epoch_losses_train, epoch_probs_train, argmaxXentropy_train, bmm_model, bmm_model_maxLoss, bmm_model_minLoss = \
                track_training_loss(args, cnn, device, train_loader_track, epoch, bmm_model, bmm_model_maxLoss,
                                    bmm_model_minLoss)
            l1, temp = train(args, cnn, device, train_loader, optimizer, epoch,
                             bmm_model, bmm_model_maxLoss, bmm_model_minLoss,
                             temp, num_classes)
            #l2=train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss)
            #acc.append(test(args, cnn, device, test_loader))
            #l1 = train_true_label(args, cnn, device, train_loader, optimizer, epoch, bmm_model, bmm_model_maxLoss,bmm_model_minLoss)
            #l2 = train_uncertainty(args, cnn, device, train_loader, optimizer1, epoch, bmm_model, bmm_model_maxLoss,bmm_model_minLoss)
            #l3=train_DMI(args, cnn, device, train_loader, optimizer1, epoch,bmm_model, bmm_model_maxLoss, bmm_model_minLoss, criterion = DMI_loss)
            #l1 = train_together(args, cnn, device, train_loader, optimizer, epoch, bmm_model, bmm_model_maxLoss,
            #                    bmm_model_minLoss)
            #loss.append(l1)
            #out.append(out10)
            acc.append(test(args, cnn, device, test_loader))
        print("Evaluate on validset")
        test(args, cnn, device, valid_loader)
        temp -= 0.0024

    name = str(args.dataset) + " " + str(args.noise_type) + " " + str(
        args.noise_rate)
예제 #12
0
 def setUp(self):
     self.clothing = Clothing('orange', 'M', 'stripes', 35)
     self.blouse = Blouse('blue', 'M', 'luxury', 40, 'Brazil')
     self.pants = Pants('black', 32, 'baggy', 60, 30)
예제 #13
0
 def __init__(self, color, size, style, price, country_of_origin):
     Clothing.__init__(self, color, size, style, price)
     self.country_of_origin = country_of_origin
예제 #14
0
 def __init__(self, color, size, style, price, waist):
     Clothing.__init__(self, color, size, style, price)
     self.waist = waist