예제 #1
0
 def test_attribute_types(self):
     """
     tests the class attributes exist and are of correct type
     also tests instantiation of new object
     """
     # creates new instance of Identity
     new_obj = Identity()
     # adds name attribute (inherited requirement from BaseModel)
     # adds optional attributes for testing
     # (id should be set by primary key)
     # (created_at, updated_at should be set by datetime)
     new_obj.name = "test_name"
     # attributes_dict sets up dictionary of attribute names and types
     attributes_dict = {
         "id": str,
         "created_at": datetime,
         "updated_at": datetime,
         "name": str,
     }
     # loops through attributes_dict as subTests to check each attribute
     for attr, attr_type in attributes_dict.items():
         with self.subTest(attr=attr, attr_type=attr_type):
             # tests the expected attribute is in the instance's dict
             self.assertIn(attr, new_obj.__dict__)
             # tests the attribute is the expected type
             self.assertIs(type(new_obj.__dict__[attr]), attr_type)
예제 #2
0
 def test_save_method(self, mock_storage):
     """
     tests that the save() method inherited from BaseModel calls on
     storage.new() to add and commit the object to the database
     """
     # creates new instance of Identity
     new_obj = Identity()
     # adds name as required attribute for database
     # (id should be set by primary key)
     # (created_at, updated_at should be set by datetime)
     new_obj.name = "test_name"
     # saves new instance and tests if storage method new is called
     new_obj.save()
     self.assertTrue(mock_storage.new.called)
예제 #3
0
    def test_init_method(self):
        """
        tests the __init__ method for instantiating new objects
        both new and from kwargs
        __init__ method calls on inherited BaseModel with super()
        """
        # creates new instance of Identity
        new_obj1 = Identity()
        # tests that the new object is of type Identity
        self.assertIs(type(new_obj1), Identity)
        # adds all attributes for testing
        # (id should be set by primary key)
        # (created_at, updated_at should be set by datetime)
        new_obj1.name = "test_name"
        # attributes_dict sets up dictionary of attribute names and types
        attributes_dict = {
            "id": str,
            "created_at": datetime,
            "updated_at": datetime,
            "name": str,
        }
        # loops through attributes_dict as subTests to check each attribute
        for attr, attr_type in attributes_dict.items():
            with self.subTest(attr=attr, attr_type=attr_type):
                # tests the expected attribute is in the object's dict
                self.assertIn(attr, new_obj1.__dict__)
                # tests the attribute is the expected type
                self.assertIs(type(new_obj1.__dict__[attr]), attr_type)
        # sets kwargs using object's dict and uses to create new object
        kwargs = new_obj1.__dict__
        new_obj2 = Identity(**kwargs)
        # tests that the new object is of type Identity
        self.assertIs(type(new_obj2), Identity)
        # loops through attributes_dict as subTests to check each attribute
        for attr, attr_type in attributes_dict.items():
            with self.subTest(attr=attr, attr_type=attr_type):
                # tests the expected attribute is in the object's dict
                self.assertIn(attr, new_obj2.__dict__)
                # tests the attribute is the expected type
                self.assertIs(type(new_obj2.__dict__[attr]), attr_type)
                # tests the value of name attribute matches the original object
                self.assertEqual(new_obj1.__dict__[attr],
                                 new_obj2.__dict__[attr])

        # tests that __class__ is not set in object 2
        self.assertNotIn('__class__', new_obj2.__dict__)
예제 #4
0
def create_identity():
    """
    create a new instance of Identity
    through POST request
    """
    # get JSON from POST request
    json = request.get_json(silent=True)
    # checks for missing attributes
    if json is None:
        abort(400, 'Not a JSON')
    if 'name' not in json:
        abort(400, 'Missing name attribute')
    # create new instance with **kwargs from json
    new_obj = Identity(**json)
    new_obj.save()
    # return json version of object's to_dict()
    return jsonify(new_obj.to_dict()), 201
예제 #5
0
def main():
    args = parser.parse_args()

    global results_path
    results_path = os.path.join('evaluations', args.evaluation_name)
    mkdir(results_path)
    options = vars(args)
    save_options_dir = os.path.join(results_path, 'options.txt')

    with open(save_options_dir, 'wt') as opt_file:
        opt_file.write('------------ Options -------------\n')
        for k, v in sorted(options.items()):
            opt_file.write('%s: %s\n' % (str(k), str(v)))
        opt_file.write('-------------- End ----------------\n')

    printer = pprint.PrettyPrinter()
    printer.pprint(options)

    # Create model
    print("=> creating model '{}'".format(args.arch))

    if args.arch == 'default_convnet':
        model = ConvNet()
    else:
        model = models.__dict__[args.arch]()

        if args.out_dim is not None:
            lin = nn.Linear(model.fc.in_features, args.out_dim)
            model.fc = lin
        else:
            model.fc = Identity()

    # Load checkpoint
    if os.path.isfile(args.checkpoint):
        print("=> loading checkpoint '{}'".format(args.checkpoint))
        checkpoint = torch.load(args.checkpoint)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' )".format(args.checkpoint))
    else:
        print("=> no checkpoint found at '{}'".format(args.checkpoint))

    if not args.cpu:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)

    model.eval()

    cudnn.benchmark = True

    # Testing data
    test_dataset = MiniImageNet('test', args.splits_path)

    test_sampler = EpisodicBatchSampler(test_dataset.labels, args.n_episodes, args.n_way, args.n_support + args.n_query)
    test_loader = DataLoader(dataset=test_dataset, batch_sampler=test_sampler,
                             num_workers=args.workers, pin_memory=True)

    test(test_loader, model, args)
예제 #6
0
 def test_class_and_subclass(self):
     """
     tests that instances are of Identity class
     and are a subclass of BaseModel class
     """
     new_obj = Identity()
     # tests that the new instance is of type Identity
     self.assertIs(type(new_obj), Identity)
     # tests that the new instance is a subclass of BaseModel
     self.assertIsInstance(new_obj, BaseModel)
예제 #7
0
 def test_str_method(self):
     """
     tests the __str__ method returns the correct format
     this method is inherited from BaseModel, but should show Identityclass
     """
     # creates new instance of Identity and saves variables
     new_obj = Identity()
     obj_id = new_obj.id
     obj_dict = new_obj.__dict__
     # tests the string representation of object is formatted correctly
     self.assertEqual(str(new_obj),
                      "[Identity.{}] {}".format(obj_id, obj_dict))
예제 #8
0
 def test_delete_identity(self):
     """
     tests the storage.delete() method removes and commits obj to database
     for an object from the Identity class
     """
     # connect to MySQL database through MySQLdb and get initial count
     db = connect(host=ION_MYSQL_HOST,
                  user=ION_MYSQL_USER,
                  passwd=ION_MYSQL_PWD,
                  db=ION_MYSQL_DB)
     cur = db.cursor()
     cur.execute("""SELECT * FROM identities""")
     objs_for_count1 = cur.fetchall()
     # creates new instance of Identity
     new_obj = Identity()
     # tests that the new object is of type Identity
     self.assertIs(type(new_obj), Identity)
     # adds all attributes required for testing
     # (id should be set by primary key)
     # (created_at, updated_at should be set by datetime)
     new_obj.name = "test_name"
     # save the object with BaseModel save method
     # save method calls storage.new() and storage.save()
     new_obj.save()
     # closes connection to database and restarts connection with MySQLdb
     cur.close()
     db.close()
     db = connect(host=ION_MYSQL_HOST,
                  user=ION_MYSQL_USER,
                  passwd=ION_MYSQL_PWD,
                  db=ION_MYSQL_DB)
     cur = db.cursor()
     cur.execute("""SELECT * FROM identities""")
     objs_for_count2 = cur.fetchall()
     # tests that there is one more obj saved to identities table in db
     self.assertEqual(len(objs_for_count1) + 1, len(objs_for_count2))
     # delete the object with BaseModel delete method
     # delete instance method calls storage.delete() and storage.save()
     new_obj.delete()
     # closes connection to database and restarts connection with MySQLdb
     cur.close()
     db.close()
     db = connect(host=ION_MYSQL_HOST,
                  user=ION_MYSQL_USER,
                  passwd=ION_MYSQL_PWD,
                  db=ION_MYSQL_DB)
     cur = db.cursor()
     cur.execute("""SELECT * FROM identities""")
     objs_for_count3 = cur.fetchall()
     # tests that there is one less obj in identities table in db
     self.assertEqual(len(objs_for_count2) - 1, len(objs_for_count3))
     self.assertEqual(len(objs_for_count1), len(objs_for_count3))
     # closes the connection
     cur.close()
     db.close()
예제 #9
0
 def test_all_identities_count(self):
     """
     tests all method retrieves all objects when class is Identity
     """
     # connect to MySQL database through MySQLdb and get initial count
     db = connect(host=ION_MYSQL_HOST,
                  user=ION_MYSQL_USER,
                  passwd=ION_MYSQL_PWD,
                  db=ION_MYSQL_DB)
     cur = db.cursor()
     cur.execute("""SELECT * FROM identities""")
     identity_objs = cur.fetchall()
     cur.execute("""SELECT * FROM profiles""")
     profile_objs = cur.fetchall()
     cur.execute("""SELECT * FROM skills""")
     skills_objs = cur.fetchall()
     total_count = len(identity_objs) + len(profile_objs) + len(skills_objs)
     total_identity_count = len(identity_objs)
     # call storage.all() method, both with and without class specified
     all_objs = storage.all()
     count1 = len(all_objs.keys())
     all_identity_objs = storage.all(Identity)
     identity_count1 = len(all_identity_objs.keys())
     # tests that counts from all method match current database
     self.assertEqual(total_count, count1)
     self.assertEqual(total_identity_count, identity_count1)
     # creates new Identity obj to test with
     new_obj = Identity()
     # adds all attributes required for testing
     # (id should be set by primary key)
     # (created_at, updated_at should be set by datetime)
     new_obj.name = "test_name"
     # saves new object to the database
     new_obj.save()
     # re-call storage.all() method
     all_objs = storage.all()
     count2 = len(all_objs.keys())
     all_identity_objs = storage.all(Identity)
     identity_count2 = len(all_identity_objs.keys())
     # tests that counts increased by 1
     self.assertEqual(count1 + 1, count2)
     self.assertEqual(identity_count1 + 1, identity_count2)
     # deletes new object from the database
     new_obj.delete()
     # re-call storage.all() method
     all_objs = storage.all()
     count3 = len(all_objs.keys())
     all_identity_objs = storage.all(Identity)
     identity_count3 = len(all_identity_objs.keys())
     # tests that count decreased by 1
     self.assertEqual(count2 - 1, count3)
     self.assertEqual(count1, count3)
     self.assertEqual(identity_count2 - 1, identity_count3)
     self.assertEqual(identity_count1, identity_count3)
예제 #10
0
 def test_all_identities_dict(self):
     """
     tests return of all method when class is Identity
     """
     # connect to MySQL database through MySQLdb and get initial count
     db = connect(host=ION_MYSQL_HOST,
                  user=ION_MYSQL_USER,
                  passwd=ION_MYSQL_PWD,
                  db=ION_MYSQL_DB)
     cur = db.cursor()
     cur.execute("""SELECT * FROM identities""")
     identity_objs = cur.fetchall()
     total_identity_count = len(identity_objs)
     # call storage.all() method
     all_identity_objs = storage.all(Identity)
     identity_count1 = len(all_identity_objs.keys())
     # tests that all method returns same count of Identity objects
     self.assertEqual(total_identity_count, identity_count1)
     # tests that all method returns dictionary
     self.assertIsInstance(all_identity_objs, dict)
     # creates new Identity obj to test with
     new_obj = Identity()
     # adds all attributes required for testing
     # (id should be set by primary key)
     # (created_at, updated_at should be set by datetime)
     new_obj.name = "test_name"
     # saves new object to the database
     new_obj.save()
     # re-call storage.all() method and test that count increased by 1
     all_identity_objs = storage.all(Identity)
     identity_count2 = len(all_identity_objs.keys())
     self.assertEqual(identity_count1 + 1, identity_count2)
     # tests that newly created obj is in dictionary with correct key
     self.assertIsInstance(storage.all(), dict)
     dict_key = "{}.{}".format("Identity", new_obj.id)
     self.assertIn(dict_key, storage.all())
     # get obj attributes from stroage.all() dictionary using obj id
     # test that retrieved attributes match expected values
     obj_class = storage.all().get("Identity.{}".format(
         new_obj.id)).__class__.__name__
     self.assertEqual("Identity", obj_class)
     obj_name = storage.all().get("Identity.{}".format(new_obj.id)).name
     self.assertEqual("test_name", obj_name)
     # delete new object from the database
     new_obj.delete()
     # re-call storage.all() method and test that count decreased by 1
     all_identity_objs = storage.all(Identity)
     identity_count3 = len(all_identity_objs.keys())
     self.assertEqual(identity_count2 - 1, identity_count3)
     self.assertEqual(identity_count1, identity_count3)
     # tests that new object is no longer in return dictionary
     self.assertNotIn(dict_key, storage.all())
예제 #11
0
 def test_updated_datetime_attributes(self):
     """
     tests that the datetime attribute updated_at changes
     when the save() method is implemented
     """
     first_time = datetime.now()
     new_obj = Identity()
     second_time = datetime.now()
     # tests if object's created_at time is between timestamps
     self.assertTrue(first_time <= new_obj.created_at <= second_time)
     # tests if object's updated_at is within the same timestamps
     self.assertTrue(first_time <= new_obj.updated_at <= second_time)
     # gets timestamps of current attributes and pauses a moment
     original_created_at = new_obj.created_at
     original_updated_at = new_obj.updated_at
     sleep(1)
     # adds required attributes so the object can be saved; saves object
     new_obj.name = "test_name"
     new_obj.save()
     # tests that the object's updated_at has changed and is later
     self.assertNotEqual(original_updated_at, new_obj.updated_at)
     self.assertTrue(original_updated_at < new_obj.updated_at)
     # tests that only the object's updated_at datetime has changed
     self.assertEqual(original_created_at, new_obj.created_at)
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu
    cudnn.benchmark = True

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # Create model
    if args.arch == 'default_convnet':
        model = ConvNet()
    else:
        if args.pretrained:
            print("=> using pre-trained model '{}'".format(args.arch))
            model = models.__dict__[args.arch](pretrained=True)
        else:
            print("=> creating model '{}'".format(args.arch))
            model = models.__dict__[args.arch]()

        if args.out_dim is not None:
            lin = nn.Linear(model.fc.in_features, args.out_dim)
            weights_init_xavier(lin)
            model.fc = lin
        else:
            model.fc = Identity()

    print('Number of parameters: ',
          sum([p.numel() for p in model.parameters()]))

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(args.workers / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # Define optimizer
    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), args.lr)

    else:
        raise ValueError('Optimizer should be "sgd" or "adam"')

    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=args.step_size,
                                                   gamma=args.gamma)

    # Optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # Data loading code
    train_dataset = MiniImageNet('train', args.splits_path)
    train_sampler = EpisodicBatchSampler(train_dataset.labels,
                                         args.n_episodes_train,
                                         args.n_way_train,
                                         args.n_support + args.n_query_train)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_sampler=train_sampler,
                              num_workers=args.workers,
                              pin_memory=True)

    val_dataset = MiniImageNet('val', args.splits_path)
    val_sampler = EpisodicBatchSampler(val_dataset.labels, args.n_episodes_val,
                                       args.n_way_val,
                                       args.n_support + args.n_query_val)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_sampler=val_sampler,
                            num_workers=args.workers,
                            pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        lr_scheduler.step()
        if args.distributed:
            train_sampler.set_epoch(epoch)

        # Train for one epoch
        loss_t, acc_t = train(train_loader, model, optimizer, epoch, args)

        # Evaluate on validation set
        loss_val, acc1 = validate(val_loader, model, args)

        dict_metrics = {
            'loss_training': loss_t,
            'loss_validation': loss_val,
            'acc_training': acc_t,
            'acc_validation': acc1
        }

        for key in dict_metrics:
            with open(os.path.join(results_dir, key + '.txt'), "a+") as myfile:
                myfile.write(str(dict_metrics[key]) + '\n')

        # Remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            print('Saving model...')
            if args.gpu is None:
                save_checkpoint(
                    {
                        'epoch': epoch + 1,
                        'arch': args.arch,
                        'state_dict': model.module.state_dict(),
                        'best_acc1': best_acc1,
                        'optimizer': optimizer.state_dict(),
                    }, is_best, results_dir)
            else:
                save_checkpoint(
                    {
                        'epoch': epoch + 1,
                        'arch': args.arch,
                        'state_dict': model.state_dict(),
                        'best_acc1': best_acc1,
                        'optimizer': optimizer.state_dict(),
                    }, is_best, results_dir)
def main():
    args = parser.parse_args()

    global results_path
    results_path = os.path.join('evaluations', args.evaluation_name)
    mkdir(results_path)
    options = vars(args)
    save_options_dir = os.path.join(results_path, 'options.txt')

    with open(save_options_dir, 'wt') as opt_file:
        opt_file.write('------------ Options -------------\n')
        for k, v in sorted(options.items()):
            opt_file.write('%s: %s\n' % (str(k), str(v)))
        opt_file.write('-------------- End ----------------\n')

    global printer
    printer = pprint.PrettyPrinter()
    printer.pprint(options)
    # Create model
    print("=> creating model '{}'".format(args.arch))
    model = models.__dict__[args.arch]()

    if args.out_dim is not None:
        lin = nn.Linear(model.fc.in_features, args.out_dim)
        model.fc = lin
    else:
        model.fc = Identity()

    # Load checkpoint
    if os.path.isfile(args.checkpoint):
        print("=> loading checkpoint '{}'".format(args.checkpoint))
        checkpoint = torch.load(args.checkpoint)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' )".format(args.checkpoint))
    else:
        print("=> no checkpoint found at '{}'".format(args.checkpoint))

    if not args.cpu:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)

    model.eval()

    cudnn.benchmark = True

    # Data loading code
    mean = np.array(args.subtract_mean)
    std = np.array(args.subtract_std)

    if mean[0] > 1 or mean[1] > 1 or mean[2] > 1:
        print(
            'One or more of the subtract mean values were above 1, dividing by 255...'
        )
        mean /= 255

    if std[0] > 1 or std[1] > 1 or std[2] > 1:
        print(
            'One or more of the subtract std values were above 1, dividing by 255...'
        )
        std /= 255

    print('Normalizing by mean of %.4f, %.4f, %.4f' %
          (mean[0], mean[1], mean[2]))
    print('Normalizing by std of %.4f, %.4f, %.4f' % (std[0], std[1], std[2]))

    # normalize = transforms.Normalize(mean=mean, std=std)

    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    # Training data
    train_directory = args.train_dir
    train_dataset = ImageFolder(
        train_directory,
        transforms.Compose([
            transforms.Resize((args.image_size, args.image_size)),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch_size,
                              num_workers=args.workers,
                              pin_memory=True,
                              shuffle=False)

    classes = train_dataset.classes

    # Testing data
    testing_directory = args.test_dir
    test_dataset = ImageFolder(
        testing_directory,
        transforms.Compose([
            transforms.Resize((args.image_size, args.image_size)),
            transforms.ToTensor(),
            normalize,
        ]))
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=args.batch_size,
                             num_workers=args.workers,
                             pin_memory=True,
                             shuffle=False)

    class_prototypes = compute_prototypes(train_loader, model, classes,
                                          args.load_prototypes,
                                          args.save_prototypes)
    class_prototypes = torch.stack(
        [values for key, values in class_prototypes.items()])
    evaluation(test_loader, class_prototypes, model, classes, args)