def calculate_mode_connectivity(w1, w2, eval_loader, config):
    net = load_model('{}/{}.pth'.format(config['exp_dir'], 'init')).to(DEVICE)
    loss_history, acc_history, ts = [], [], []
    for t in np.arange(0.0, 1.01, 0.025):
        ts.append(t)
        net = assign_weights(net, w1 + t*(w2-w1)).to(DEVICE)
        metrics = eval_single_epoch(net, eval_loader)
        loss_history.append(metrics['loss'])
        acc_history.append(metrics['accuracy'])
    return loss_history, acc_history, ts
Ejemplo n.º 2
0
def get_good_intention(text_vector):
    """
    Returns the intention predicted from the received text vector.

    param : :text_vector : <list>
    return <Enum>
    """
    recognizer = load_model(
        'luci/models/good_intentions')  # TODO get path from settings

    return Intentions.good_intentions.get(recognizer.predict([text_vector])[0])
Ejemplo n.º 3
0
    def post(self, request, *args, **kwargs):
        data = request.data
        serializer_data = SoftDeleteSerializer(data=data)
        if serializer_data.is_valid():
            context = data["context"]
            uid = data["uid"]
            ModelClass = load_model(context)

            model_object = ModelClass.objects.filter(uid=uid, is_deleted=False).first()
            if model_object is None:
                response = "Data not found"
                status_code = 400
            else:
                model_object.soft_delete()
                response = "Data deleted successfully"
                status_code = 200

            return Response(data={"data": response, "success": True if status_code == 200 else False}, status=status_code)
Ejemplo n.º 4
0
def main(args, callback=None, upload_checkpoint=False):
    print(vars(args))
    print('upload_checkpoint', upload_checkpoint)
    if not os.path.exists(args.res_dir):
        os.makedirs(args.res_dir)
    with open(os.path.join(args.res_dir, 'params.json'), 'w') as f:
        json.dump(vars(args), f, indent=4)

    if not os.path.exists(os.path.dirname(args.checkpoint)):
        os.makedirs(os.path.dirname(args.checkpoint))
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    if args.seed >= 0:
        torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    transform_list = [
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[STD, STD, STD])
    ]
    if args.net_params['crop'] > 0:
        transform_list = [transforms.CenterCrop(args.net_params['crop'])
                          ] + transform_list
    data_transform = transforms.Compose(transform_list)
    face_dataset_train = datasets.ImageFolder(root=args.dataset,
                                              transform=data_transform)
    # face_dataset_test = datasets.ImageFolder(root='test',
    #                                           transform=data_transform)
    train_loader = torch.utils.data.DataLoader(face_dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=0)

    # test_loader = torch.utils.data.DataLoader(face_dataset_test,
    #     batch_size=args.test_batch_size, shuffle=True, **kwargs)
    # args.checkpoint = "cnn3.pth"

    model = Encoder(args.net_params,
                    next(iter(train_loader))[0].shape).to(device)
    if args.optimizer == 'sgd':
        if args.lr is None:
            args.lr = 0.07
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              nesterov=True,
                              momentum=0.8,
                              weight_decay=0)
    elif args.optimizer == 'adam':
        if args.lr is None:
            args.lr = 0.00075
        optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0)
    else:
        raise NotImplementedError
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=1,
                                          gamma=args.decay_lr)
    if os.path.exists(args.checkpoint):
        epoch_start = load_model(args.checkpoint, model, optimizer,
                                 scheduler) + 1
    else:
        epoch_start = 1
    if False:
        set_lr(optimizer, args.lr)
    process_upload = None
    for epoch in range(epoch_start, args.epochs + 1):
        print('learning rate {:.5f}'.format(get_lr(optimizer)))
        process_upload = train(args, model, device, train_loader, optimizer,
                               epoch, upload_checkpoint, callback,
                               process_upload, scheduler)
        if process_upload is not None:
            process_upload.join()
        save_model(args.checkpoint, epoch, model, optimizer, scheduler)
        if callback is not None:
            callback(False)
            if upload_checkpoint:
                process_upload = start_process(callback, (True, ))

        scheduler.step()
Ejemplo n.º 5
0
    def create(self):
        self.print("creating application...")

        # create logger
        try:
            log_cfg = self.config["logger"]
            log_name = log_cfg["name"]
            log_level = log_cfg["level"]

            logger = logging.getLogger(log_name)
            logger.setLevel(log_level)

            handler = TimedRotatingFileHandler(
                filename=log_cfg["path"],
                when=log_cfg["when"],
                interval=log_cfg["interval"],
                utc=True)
            handler.setLevel(log_level)
            formatter = logging.Formatter('%(asctime)s [%(levelname)s] > %(message)s')
            formatter.converter = time.gmtime
            handler.setFormatter(formatter)
            logger.addHandler(handler)

            self.print("logger: OK")
            self.logger = logger
        except Exception:
            self.print("logger: FAILED")
            raise

        # create Flask app
        try:
            app_cfg = self.config["app"]
            name = app_cfg["name"]
            app = Flask(name)
            app.logger.disabled = True
            logging.getLogger('werkzeug').disabled = True

            self.print("flask: OK")
            self.app = app
        except Exception:
            self.print("flask: FAILED")
            raise

        # create classification model
        try:
            model_name = app_cfg["model"]
            model_cfg = self.config["models"][model_name]
            model = load_model(model_name, model_cfg, logger=self.logger)

            self.print("model: OK")
            self.model = model
        except Exception:
            self.print("model: FAILED")
            raise

        # initialize image store
        try:
            image_store = ImageStore(self.config)

            self.print("image store: OK")
            self.image_store = image_store
        except Exception:
            self.print("image store: FAILED")
            raise

        self.print("application created")
        self.log("application created")
        self.log(f"working model: {model.info()}")

        return self.app
Ejemplo n.º 6
0
def main(args, callback=None, upload_checkpoint=False):
    use_cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")
    print('device', device)
    kwargs = {'num_workers': 2, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
        os.path.join(args.dataset, 'cifar10'),
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.RandomCrop(32, padding=4),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
        os.path.join(args.dataset, 'cifar10'),
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    num_out = num_classes
    print(vars(args))
    print(args.net_params)
    model = networks.Net(num_out, args.net_params,
                         next(iter(train_loader))[0].shape).to(device)
    if args.optimizer.lower() == "adam":
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               weight_decay=args.weight_decay)
    elif args.optimizer.lower() == "sgd":
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              weight_decay=args.weight_decay,
                              momentum=0.8)
    else:
        raise NotImplementedError
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.98)
    utils.load_model(args.checkpoint, model, optimizer, scheduler)
    best_acc = 0.0
    for epoch in range(1, args.epochs + 1):
        print('learning rate {:.5f}'.format(utils.get_lr(optimizer)))
        train(args, model, device, train_loader, optimizer, epoch, scheduler)
        acc = test(args, model, device, test_loader)
        best_acc = max(best_acc, acc)
        scheduler.step()

    results_path = os.path.join(args.res_dir, 'results_cifar10.json')
    if os.path.exists(results_path):
        with open(results_path, 'r') as f:
            results = json.load(f)
    else:
        results = {'res': []}
    results['res'].append([vars(args), {'best': best_acc, 'last': acc}])
    if not os.path.exists(os.path.dirname(results_path)):
        os.makedirs(os.path.dirname(results_path))
    with open(results_path, 'w') as f:
        json.dump(results, f, indent=4)