コード例 #1
0
    def create_model(ema=False):
        if args.type == 1:
            model = Classifier(num_classes=10)
        else:
            model = WideResNet(num_classes=10)
        model = model.cuda()

        if ema:
            for param in model.parameters():
                param.detach_()

        return model
コード例 #2
0
    def __init__(self, model_name, vocab, lr, lr_decay, batch_size=64):
        """
        PyTorch Lightning module that creates the overall model.
        Inputs:
            model_name - String denoting what encoder class to use.  Either 'AWE', 'UniLSTM', 'BiLSTM', or 'BiLSTMMax'
            vocab - Vocabulary from alignment between SNLI dataset and GloVe vectors
            lr - Learning rate to use for the optimizer
            lr_decay - Learning rate decay factor to use each epoch
            batch_size - Size of the batches. Default is 64
        """
        super().__init__()
        self.save_hyperparameters()

        # create an embedding layer for the vocabulary embeddings
        self.glove_embeddings = nn.Embedding.from_pretrained(vocab.vectors)

        # check which encoder model to use
        if model_name == 'AWE':
            self.encoder = AWEEncoder()
            self.classifier = Classifier()
        elif model_name == 'UniLSTM':
            self.encoder = UniLSTM()
            self.classifier = Classifier(input_dim=4 * 2048)
        elif model_name == 'BiLSTM':
            self.encoder = BiLSTM()
            self.classifier = Classifier(input_dim=4 * 2 * 2048)
        else:
            self.encoder = BiLSTMMax()
            self.classifier = Classifier(input_dim=4 * 2 * 2048)

        # create the loss function
        self.loss_function = nn.CrossEntropyLoss()

        # create instance to save the last validation accuracy
        self.last_val_acc = None
コード例 #3
0
 def create(self, classifier=Classifier()):
     """
     (Classifier) -> (Classifier)
     """
     classifierDB = ClassifierDB(classifier=classifier)
     session = self.session_factory()
     session.add(classifierDB)
     session.flush()
     session.refresh(classifierDB)
     session.commit()
     return Classifier(
         classifierDB.id,
         Plant(classifierDB.plant.id, classifierDB.plant.scientificName,
               classifierDB.plant.commonName), classifierDB.tag,
         classifierDB.path)
コード例 #4
0
def train_classifier(x_data, y_data, auto_encoder):
    model = Classifier(args).to(device)

    x_data, y_data = Utils.SuffleData(x_data, y_data, len(xy))

    x_train = x_data[:args.training_len]
    y_train = y_data[:args.training_len]

    x_train, y_train = Utils.generate_data(x_data, x_train, y_train, auto_encoder)

    x_test = x_data[args.training_len:]
    y_test = y_data[args.training_len:]
    test_len = len(x_test)

    model.learn(x_train, y_train, test_len, model, x_test, y_test, args.batch_size)
コード例 #5
0
 def __init__(self, keep_prob, \
              batch_size=100, num_channels=1, learning_rate=0.001, fce=False, num_classes_per_set=5, \
              num_samples_per_class=1, nClasses = 0, image_size = 28):
     super(MatchingNetwork, self).__init__()
     """
     Builds a matching network, the training and evaluation ops as well as data augmentation routines.
     :param keep_prob: A tf placeholder of type tf.float32 denotes the amount of dropout to be used
     :param batch_size: The batch size for the experiment
     :param num_channels: Number of channels of the images
     :param is_training: Flag indicating whether we are training or evaluating
     :param rotate_flag: Flag indicating whether to rotate the images
     :param fce: Flag indicating whether to use full context embeddings (i.e. apply an LSTM on the CNN embeddings)
     :param num_classes_per_set: Integer indicating the number of classes per set
     :param num_samples_per_class: Integer indicating the number of samples per class
     :param nClasses: total number of classes. It changes the output size of the classifier g with a final FC layer.
     :param image_input: size of the input image. It is needed in case we want to create the last FC classification 
     """
     self.batch_size = batch_size
     self.fce = fce
     self.g = Classifier(layer_size=64,
                         num_channels=num_channels,
                         nClasses=nClasses,
                         image_size=image_size)
     if fce:
         self.lstm = BidirectionalLSTM(layer_sizes=[32],
                                       batch_size=self.batch_size,
                                       vector_dim=self.g.outSize)
     self.dn = DistanceNetwork()
     self.classify = AttentionalClassify()
     self.keep_prob = keep_prob
     self.num_classes_per_set = num_classes_per_set
     self.num_samples_per_class = num_samples_per_class
     self.learning_rate = learning_rate
コード例 #6
0
 def create(self, analysis=Analysis()):
     """
     (Analysis) -> (Analysis)
     Add analysis to database
     """
     analysisDB = AnalysisDB(analysis=analysis)
     session = self.session_factory()
     session.add(analysisDB)
     session.flush()
     session.refresh(analysisDB)
     session.commit()
     return Analysis(
         analysisDB.id,
         image=Image(
             analysisDB.image.id,
             Disease(
                 analysisDB.image.disease.id,
                 Plant(analysisDB.image.disease.plant.id,
                       analysisDB.image.disease.plant.scientificName,
                       analysisDB.image.disease.plant.commonName),
                 analysisDB.image.disease.scientificName,
                 analysisDB.image.disease.commonName), analysisDB.image.url,
             analysisDB.image.description, analysisDB.image.source,
             analysisDB.image.size),
         classifier=Classifier(
             analysisDB.classifier.id,
             Plant(analysisDB.classifier.plant.id,
                   analysisDB.classifier.plant.scientificName,
                   analysisDB.classifier.plant.commonName),
             analysisDB.classifier.tag, analysisDB.classifier.path),
         user=User(id=analysisDB.user.id,
                   idType=analysisDB.user.idType,
                   email=analysisDB.user.email,
                   username=analysisDB.user.username))
コード例 #7
0
 def searchByID(self, id):
     """
     (Int) -> (Classifier)
     """
     session = self.session_factory()
     classifierDB = session.query(ClassifierDB).get(id)
     return Classifier(
         classifierDB.id,
         Plant(classifierDB.plant.id, classifierDB.plant.scientificName,
               classifierDB.plant.commonName), classifierDB.tag,
         classifierDB.path)
コード例 #8
0
 def search(self, classifier=Classifier(), pageSize=10, offset=0):
     """
     (Classifier, pageSize, offset) -> {'total': int, 'content':[Classifier]}
     """
     session = self.session_factory()
     query = session.query(ClassifierDB).filter(
         and_(ClassifierDB.tag.like('%' + classifier.tag + '%'),
              ClassifierDB.path.like('%' + classifier.path + '%')))
     content = query.slice(offset, pageSize).all()
     total = query.count()
     classifiers = []
     for classifierDB in content:
         classifiers.append(
             Classifier(
                 classifierDB.id,
                 Plant(classifierDB.plant.id,
                       classifierDB.plant.scientificName,
                       classifierDB.plant.commonName), classifierDB.tag,
                 classifierDB.path))
     dic = {'total': total, 'content': classifiers}
     return dic
コード例 #9
0
    def __init__(self, model, ema_model, run_type=0, alpha=0.999):
        self.model = model
        self.ema_model = ema_model
        self.alpha = alpha
        if run_type == 1:
            self.tmp_model = Classifier(num_classes=10).cuda()
        else:
            self.tmp_model = WideResNet(num_classes=10).cuda()
        self.wd = 0.02 * args.lr

        for param, ema_param in zip(self.model.parameters(),
                                    self.ema_model.parameters()):
            ema_param.data.copy_(param.data)
コード例 #10
0
 def create(self, analysisResult=AnalysisResult()):
     """
     (AnalysisResult) -> (AnalysisResult)
     Add analysis result to database
     """
     analysisResultDB = AnalysisResultDB(analysisResult=analysisResult)
     session = self.session_factory()
     session.add(analysisResultDB)
     session.flush()
     session.commit()
     session.refresh(analysisResultDB)
     return AnalysisResult(
         analysisResultDB.id,
         Analysis(
             analysisResultDB.analysis.id,
             Image(
                 analysisResultDB.analysis.image.id,
                 Disease(
                     analysisResultDB.analysis.image.disease.id,
                     Plant(
                         analysisResultDB.analysis.image.disease.plant.id,
                         analysisResultDB.analysis.image.disease.plant.
                         scientificName, analysisResultDB.analysis.image.
                         disease.plant.commonName),
                     analysisResultDB.analysis.image.disease.scientificName,
                     analysisResultDB.analysis.image.disease.commonName),
                 analysisResultDB.analysis.image.url,
                 analysisResultDB.analysis.image.description,
                 analysisResultDB.analysis.image.source,
                 analysisResultDB.analysis.image.size),
             Classifier(
                 analysisResultDB.analysis.classifier.id,
                 Plant(
                     analysisResultDB.analysis.classifier.plant.id,
                     analysisResultDB.analysis.classifier.plant.
                     scientificName,
                     analysisResultDB.analysis.classifier.plant.commonName),
                 analysisResultDB.analysis.classifier.tag,
                 analysisResultDB.analysis.classifier.path),
             user=User(id=analysisResultDB.analysis.user.id,
                       idType=analysisResultDB.analysis.user.idType,
                       email=analysisResultDB.analysis.user.email,
                       username=analysisResultDB.analysis.user.username)),
         Disease(
             analysisResultDB.disease.id,
             Plant(analysisResultDB.disease.plant.id,
                   analysisResultDB.disease.plant.scientificName,
                   analysisResultDB.disease.plant.commonName),
             analysisResultDB.disease.scientificName,
             analysisResultDB.disease.commonName), analysisResultDB.score,
         analysisResultDB.frame)
コード例 #11
0
    def searchByID(self, id):
        """
        (int) -> (AnalysisResult)
        Search analysis result by ID
        """
        session = self.session_factory()
        analysisResultDB = session.query(AnalysisResultDB).get(id)
        if (analysisResultDB is None):
            raise Exception("AnalysisResults not found!")

        return AnalysisResult(
            analysisResultDB.id,
            Analysis(
                analysisResultDB.analysis.id,
                Image(
                    analysisResultDB.analysis.image.id,
                    Disease(
                        analysisResultDB.analysis.image.disease.id,
                        Plant(
                            analysisResultDB.analysis.image.disease.plant.id,
                            analysisResultDB.analysis.image.disease.plant.
                            scientificName, analysisResultDB.analysis.image.
                            disease.plant.commonName),
                        analysisResultDB.analysis.image.disease.scientificName,
                        analysisResultDB.analysis.image.disease.commonName),
                    analysisResultDB.analysis.image.url,
                    analysisResultDB.analysis.image.description,
                    analysisResultDB.analysis.image.source,
                    analysisResultDB.analysis.image.size),
                Classifier(
                    analysisResultDB.analysis.classifier.id,
                    Plant(
                        analysisResultDB.analysis.classifier.plant.id,
                        analysisResultDB.analysis.classifier.plant.
                        scientificName,
                        analysisResultDB.analysis.classifier.plant.commonName),
                    analysisResultDB.analysis.classifier.tag,
                    analysisResultDB.analysis.classifier.path),
                user=User(id=analysisResultDB.analysis.user.id,
                          idType=analysisResultDB.analysis.user.idType,
                          email=analysisResultDB.analysis.user.email,
                          username=analysisResultDB.analysis.user.username)),
            Disease(
                analysisResultDB.disease.id,
                Plant(analysisResultDB.disease.plant.id,
                      analysisResultDB.disease.plant.scientificName,
                      analysisResultDB.disease.plant.commonName),
                analysisResultDB.disease.scientificName,
                analysisResultDB.disease.commonName), analysisResultDB.score,
            analysisResultDB.frame)
コード例 #12
0
 def update(self, classifier=Classifier()):
     """
     (Classifier) -> (Classifier)
     """
     session = self.session_factory()
     classifierDB = session.query(ClassifierDB).filter_by(
         id=classifier.id).first()
     dic = {}
     if (classifierDB.plant.id != classifier.plant.id):
         dic['idPlant'] = classifier.plant.id
     if (classifierDB.tag != classifier.tag):
         dic['tag'] = classifier.tag
     if (classifierDB.path != classifier.path):
         dic['path'] = classifier.path
     if (dic != {}):
         session.query(ClassifierDB).filter_by(id=classifier.id).update(dic)
         session.commit()
         session.flush()
         session.refresh(classifierDB)
     return Classifier(
         classifierDB.id,
         Plant(classifierDB.plant.id, classifierDB.plant.scientificName,
               classifierDB.plant.commonName), classifierDB.tag,
         classifierDB.path)
コード例 #13
0
 def delete(self, classifier=Classifier()):
     """
     (Plant) -> (Boolean)
     """
     status = False
     session = self.session_factory()
     classifierDB = session.query(ClassifierDB).filter_by(
         id=classifier.id).first()
     session.delete(classifierDB)
     session.commit()
     session.flush()
     if (not session.query(ClassifierDB).filter_by(
             id=classifierDB.id).count()):
         status = True
     session.close()
     return status
コード例 #14
0
    def searchByID(self, id):
        """
        (int) -> (Analysis)
        Search analysis by ID
        """
        session = self.session_factory()
        analysisDB = session.query(AnalysisDB).get(id)
        if (analysisDB is None):
            raise Exception("Analysis not found!")

        results = []
        for result in analysisDB.analysis_results:
            results.append(
                AnalysisResult(
                    id=result.id,
                    disease=Disease(
                        id=result.disease.id,
                        scientificName=result.disease.scientificName,
                        commonName=result.disease.commonName),
                    frame=result.frame,
                    score=result.score))
        return Analysis(
            id=analysisDB.id,
            image=Image(
                analysisDB.image.id,
                Disease(
                    analysisDB.image.disease.id,
                    Plant(analysisDB.image.disease.plant.id,
                          analysisDB.image.disease.plant.scientificName,
                          analysisDB.image.disease.plant.commonName),
                    analysisDB.image.disease.scientificName,
                    analysisDB.image.disease.commonName), analysisDB.image.url,
                analysisDB.image.description, analysisDB.image.source,
                analysisDB.image.size),
            classifier=Classifier(
                analysisDB.classifier.id,
                Plant(analysisDB.classifier.plant.id,
                      analysisDB.classifier.plant.scientificName,
                      analysisDB.classifier.plant.commonName),
                analysisDB.classifier.tag, analysisDB.classifier.path),
            analysis_results=results,
            user=User(id=analysisDB.user.id,
                      idType=analysisDB.user.idType,
                      email=analysisDB.user.email,
                      username=analysisDB.user.username))
コード例 #15
0
def build_model(config, num_classes):
    # Build backbone
    print("Initializing model: {}".format(config.MODEL.NAME))
    if config.MODEL.NAME == 'resnet50':
        model = ResNet50(res4_stride=config.MODEL.RES4_STRIDE)
    else:
        raise KeyError("Invalid model: '{}'".format(config.MODEL.NAME))
    print("Model size: {:.5f}M".format(
        sum(p.numel() for p in model.parameters()) / 1000000.0))

    # Build classifier
    if config.LOSS.CLA_LOSS in ['crossentropy', 'crossentropylabelsmooth']:
        classifier = Classifier(feature_dim=config.MODEL.FEATURE_DIM,
                                num_classes=num_classes)
    else:
        classifier = NormalizedClassifier(feature_dim=config.MODEL.FEATURE_DIM,
                                          num_classes=num_classes)

    return model, classifier
コード例 #16
0
ファイル: Analysis.py プロジェクト: WendelHime/models
 def __init__(self,
              id=0,
              image=Image(),
              classifier=Classifier(),
              analysis_results=[],
              user=User()):
     """Analisys model constructor
     Args:
         id: Integer number to identification
         image: Image object that has been used for this Analysis
         classifier: Classifier object that has been used for this Analysis
         analysis_results: A list of AnalysisResult object with all results
         user: User which perform this analysis
     """
     self.id = id
     self.image = image
     self.classifier = classifier
     self.analysis_results = analysis_results
     self.user = user
コード例 #17
0
    def update(self, analysis=Analysis()):
        """
        (Analysis) -> (Analysis)
        update analysis table
        """
        session = self.session_factory()
        analysisDB = session.query(AnalysisDB).filter_by(
            id=analysis.id).first()
        dic = {}
        if (analysisDB.idImage != analysis.image.id):
            dic['idImage'] = analysis.image.id
        if (analysisDB.idClassifier != analysis.classifier.id):
            dic['idClassifier'] = analysis.classifier.id
        if (dic != {}):
            session.query(AnalysisDB).filter_by(id=analysis.id).update(dic)
            session.commit()
            session.flush()
            session.refresh(analysisDB)

        return Analysis(
            analysisDB.id,
            image=Image(
                analysisDB.image.id,
                Disease(
                    analysisDB.image.disease.id,
                    Plant(analysisDB.image.disease.plant.id,
                          analysisDB.image.disease.plant.scientificName,
                          analysisDB.image.disease.plant.commonName),
                    analysisDB.image.disease.scientificName,
                    analysisDB.image.disease.commonName), analysisDB.image.url,
                analysisDB.image.description, analysisDB.image.source,
                analysisDB.image.size),
            classifier=Classifier(
                analysisDB.classifier.id,
                Plant(analysisDB.classifier.plant.id,
                      analysisDB.classifier.plant.scientificName,
                      analysisDB.classifier.plant.commonName),
                analysisDB.classifier.tag, analysisDB.classifier.path),
            user=User(id=analysisDB.user.id,
                      idType=analysisDB.user.idType,
                      email=analysisDB.user.email,
                      username=analysisDB.user.username))
コード例 #18
0
    def search(self, analysis=Analysis(), pageSize=10, offset=0):
        """
        (Analysis, pageSize, offset) -> [Analysis]
        search by analysis
        """
        session = self.session_factory()
        query = session.query(AnalysisDB).filter(
            and_(AnalysisDB.idImage == analysis.image.id,
                 AnalysisDB.idClassifier == analysis.classifier.id))
        content = query.slice(offset, pageSize).all()
        total = query.count()
        analyses = []
        for analysisDB in content:
            analyses.append(
                Analysis(
                    analysisDB.id,
                    image=Image(
                        analysisDB.image.id,
                        Disease(
                            analysisDB.image.disease.id,
                            Plant(
                                analysisDB.image.disease.plant.id,
                                analysisDB.image.disease.plant.scientificName,
                                analysisDB.image.disease.plant.commonName),
                            analysisDB.image.disease.scientificName,
                            analysisDB.image.disease.commonName),
                        analysisDB.image.url, analysisDB.image.description,
                        analysisDB.image.source, analysisDB.image.size),
                    classifier=Classifier(
                        analysisDB.classifier.id,
                        Plant(analysisDB.classifier.plant.id,
                              analysisDB.classifier.plant.scientificName,
                              analysisDB.classifier.plant.commonName),
                        analysisDB.classifier.tag, analysisDB.classifier.path),
                    user=User(id=analysisDB.user.id,
                              idType=analysisDB.user.idType,
                              email=analysisDB.user.email,
                              username=analysisDB.user.username)))

        return {'total': total, 'content': analyses}
コード例 #19
0
    def update(self, analysisResult=AnalysisResult()):
        """
        (AnalysisResult) -> (AnalysisResult)
        update analysis_result table
        """
        session = self.session_factory()
        analysisResultDB = session.query(AnalysisResultDB).filter_by(
            id=analysisResult.id).first()
        dic = {}
        if (analysisResultDB.idAnalysis != analysisResult.analysis.id):
            dic['idAnalysis'] = analysisResult.analysis.id
        if (analysisResultDB.idDisease != analysisResult.disease.id):
            dic['idDisease'] = analysisResult.disease.id
        if (analysisResultDB.score != analysisResult.score):
            dic['score'] = analysisResult.score
        if (analysisResultDB.frame != analysisResult.frame):
            dic['frame'] = analysisResult.frame
        if (dic != {}):
            session.query(AnalysisResultDB).filter_by(
                id=analysisResult.id).update(dic)
            session.commit()
            session.flush()
            session.refresh(analysisResultDB)

        return AnalysisResult(
            analysisResultDB.id,
            Analysis(
                analysisResultDB.analysis.id,
                Image(
                    analysisResultDB.analysis.image.id,
                    Disease(
                        analysisResultDB.analysis.image.disease.id,
                        Plant(
                            analysisResultDB.analysis.image.disease.plant.id,
                            analysisResultDB.analysis.image.disease.plant.
                            scientificName, analysisResultDB.analysis.image.
                            disease.plant.commonName),
                        analysisResultDB.analysis.image.disease.scientificName,
                        analysisResultDB.analysis.image.disease.commonName),
                    analysisResultDB.analysis.image.url,
                    analysisResultDB.analysis.image.description,
                    analysisResultDB.analysis.image.source,
                    analysisResultDB.analysis.image.size),
                Classifier(
                    analysisResultDB.analysis.classifier.id,
                    Plant(
                        analysisResultDB.analysis.classifier.plant.id,
                        analysisResultDB.analysis.classifier.plant.
                        scientificName,
                        analysisResultDB.analysis.classifier.plant.commonName),
                    analysisResultDB.analysis.classifier.tag,
                    analysisResultDB.analysis.classifier.path),
                user=User(id=analysisResultDB.analysis.user.id,
                          idType=analysisResultDB.analysis.user.idType,
                          email=analysisResultDB.analysis.user.email,
                          username=analysisResultDB.analysis.user.username)),
            Disease(
                analysisResultDB.disease.id,
                Plant(analysisResultDB.disease.plant.id,
                      analysisResultDB.disease.plant.scientificName,
                      analysisResultDB.disease.plant.commonName),
                analysisResultDB.disease.scientificName,
                analysisResultDB.disease.commonName), analysisResultDB.score,
            analysisResultDB.frame)
コード例 #20
0
ファイル: main.py プロジェクト: j-zarka/SparseScatNet
def main_worker(args):
    best_acc1 = 0
    best_acc5 = 0
    best_epoch_acc1 = 0
    best_epoch_acc5 = 0

    logs_dir = args.logdir
    if not os.path.exists(logs_dir):
        os.makedirs(logs_dir)

    checkpoint_savedir = args.savedir
    if not os.path.exists(checkpoint_savedir):
        os.makedirs(checkpoint_savedir)

    logfile = os.path.join(logs_dir, 'training_{}_b_{}_lrfreq_{}.log'.format(
        args.arch, args.batch_size, args.learning_rate_adjust_frequency))

    summaryfile = os.path.join(logs_dir, 'summary_file.txt')

    checkpoint_savefile = os.path.join(checkpoint_savedir, '{}_batchsize_{}_lrfreq_{}.pth.tar'.format(
        args.arch, args.batch_size, args.learning_rate_adjust_frequency))

    best_checkpoint_savefile = os.path.join(checkpoint_savedir,'{}_batchsize_{}_lrfreq_{}_best.pth.tar'.format(
                                                args.arch, args.batch_size, args.learning_rate_adjust_frequency))

    writer = SummaryWriter(logs_dir)

    # Data loading code
    ###########################################################################################
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(traindir, transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        normalize,
    ]))

    val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        normalize,
    ]))

    # can use a subset of all classes (specified in a file or randomly chosen)
    if args.nb_classes < 1000:
        train_indices = list(np.load('utils_sampling/imagenet_train_class_indices.npy'))
        val_indices = list(np.load('utils_sampling/imagenet_val_class_indices.npy'))
        classes_names = torch.load('utils_sampling/labels_dict')
        if args.class_indices is not None:
            class_indices = torch.load(args.class_indices)
        else:
            perm = torch.randperm(1000)
            class_indices = perm[:args.nb_classes].tolist()
        train_indices_full = [x for i in range(len(class_indices)) for x in range(train_indices[class_indices[i]],
                                                                                  train_indices[class_indices[i] + 1])]
        val_indices_full = [x for i in range(len(class_indices)) for x in range(val_indices[class_indices[i]],
                                                                                val_indices[class_indices[i] + 1])]
        classes_indices_file = os.path.join(logs_dir, 'classes_indices_selected')
        selected_classes_names = [classes_names[i] for i in class_indices]
        torch.save(class_indices, classes_indices_file)
        print_and_write('Selected {} classes indices:  {}'.format(args.nb_classes, class_indices), logfile,
                        summaryfile)
        print_and_write('Selected {} classes names:  {}'.format(args.nb_classes, selected_classes_names), logfile,
                        summaryfile)
        if args.random_seed is not None:
            print_and_write('Random seed used {}'.format(args.random_seed), logfile, summaryfile)

        train_dataset = torch.utils.data.Subset(train_dataset, train_indices_full)
        val_dataset = torch.utils.data.Subset(val_dataset, val_indices_full)

    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
                                               num_workers=args.workers, pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
                                             num_workers=args.workers, pin_memory=True)
    ###########################################################################################

    # Model creation
    ###########################################################################################
    if args.arch in model_names:

        n_space = 224
        nb_channels_in = 3

        # create scattering
        J = args.scattering_J
        L_ang = args.scat_angles

        max_order = 2 if args.scattering_order2 else 1

        if args.scattering_wph:
            A = args.scattering_nphases
            scattering = ScatteringTorch2D_wph(J=J, shape=(224, 224), L=L_ang, A=A, max_order=max_order,
                                               backend=args.backend)
        else:
            scattering = Scattering2D(J=J, shape=(224, 224), L=L_ang, max_order=max_order,
                                      backend=args.backend)
        # Flatten scattering
        scattering = nn.Sequential(scattering, nn.Flatten(1, 2))

        if args.scattering_wph:
            nb_channels_in += 3 * A * L_ang * J
        else:
            nb_channels_in += 3 * L_ang * J

        if max_order == 2:
            nb_channels_in += 3 * (L_ang ** 2) * J * (J - 1) // 2

        n_space = n_space // (2 ** J)
    ###########################################################################################

        # create linear proj
        # Standardization (can also be performed with BatchNorm2d(affine=False))
        if not os.path.exists('standardization'):
            os.makedirs('standardization')
        std_file = 'standardization/ImageNet2012_scattering_J{}_order{}_wph_{}_nphases_{}_nb_classes_{}.pth.tar'.format(
            args.scattering_J, 2 if args.scattering_order2 else 1, args.scattering_wph,
            args.scattering_nphases if args.scattering_wph else 0, args.nb_classes)

        if os.path.isfile(std_file):
            print_and_write("=> loading scattering mean and std '{}'".format(std_file), logfile)
            std_dict = torch.load(std_file)
            mean_std = std_dict['mean']
            stding_mat = std_dict['matrix']
        else:
            mean_std, stding_mat, std = compute_stding_matrix(train_loader, scattering, logfile)
            print_and_write("=> saving scattering mean and std '{}'".format(std_file), logfile)
            std_dict = {'mean': mean_std, 'std': std, 'matrix': stding_mat}
            torch.save(std_dict, std_file)

        standardization = Rescaling(mean_std, stding_mat)
        # standardization = nn.BatchNorm2d(nb_channels_in, affine=False)

        if args.arch in ['sparsescatnet', 'sparsescatnetw']:
            proj = nn.Conv2d(nb_channels_in, args.L_proj_size, kernel_size=args.L_kernel_size, stride=1,
                             padding=0, bias=False)
            nb_channels_in = args.L_proj_size
            linear_proj = LinearProj(standardization, proj, args.L_kernel_size)
        else:  # scatnet
            proj = nn.Identity()
            linear_proj = LinearProj(standardization, proj, 0)

        ###########################################################################################

        # Create ISTC (when applicable)
        if args.arch in ['sparsescatnet', 'sparsescatnetw']:
        ###########################################################################################
            if args.arch == 'sparsescatnet':
                arch_log = "=> creating model SparseScatNet with phase scattering {}, linear projection " \
                           "(projection dimension {}), ISTC with {} iterations, dictionary size {}, classifier {} " \
                           "pipeline".format(args.scattering_wph, args.L_proj_size, args.n_iterations,
                                             args.dictionary_size, args.classifier_type)

                istc = ISTC(nb_channels_in, dictionary_size=args.dictionary_size, n_iterations=args.n_iterations,
                            lambda_0=args.lambda_0, lambda_star=args.lambda_star, lambda_star_lb=args.lambda_star_lb,
                            grad_lambda_star=args.grad_lambda_star, epsilon_lambda_0=args.epsilon_lambda_0,
                            output_rec=args.output_rec)

            elif args.arch == 'sparsescatnetw':
                arch_log = "=> creating model SparseScatNetW with phase scattering {}, linear projection " \
                           "(projection dimension {}), ISTCW with {} iterations, dictionary size {}, classifier {} " \
                           "pipeline".format(args.scattering_wph, args.L_proj_size, args.n_iterations,
                                             args.dictionary_size, args.classifier_type)

                istc = ISTC(nb_channels_in, dictionary_size=args.dictionary_size, n_iterations=args.n_iterations,
                            lambda_0=args.lambda_0, lambda_star=args.lambda_star, lambda_star_lb=args.lambda_star_lb,
                            grad_lambda_star=args.grad_lambda_star, epsilon_lambda_0=args.epsilon_lambda_0,
                            output_rec=args.output_rec, use_W=True)

            if not args.output_rec:
                nb_channels_in = args.dictionary_size

        elif args.arch == 'scatnet':
            arch_log = "=> creating model ScatNet with phase scattering {} and classifier {}".\
                format(args.scattering_wph, args.classifier_type)

        # Create classifier
        ###########################################################################################

        classifier = Classifier(n_space, nb_channels_in, classifier_type=args.classifier_type,
                                nb_classes=1000, nb_hidden_units=args.nb_hidden_units, nb_l_mlp=args.nb_l_mlp,
                                dropout_p_mlp=args.dropout_p_mlp, avg_ker_size=args.avg_ker_size)

        # Create model
        ###########################################################################################
        if args.arch in ['sparsescatnet', 'sparsescatnetw']:
            model = SparseScatNet(scattering, linear_proj, istc, classifier, return_full_inf=True)  # print model info

        elif args.arch == 'scatnet':
            model = nn.Sequential(scattering, linear_proj, classifier)
    else:
        print_and_write("Unknown model", logfile, summaryfile)
        return

    print_and_write(arch_log, logfile, summaryfile)
    print_and_write('Number of epochs {}, learning rate decay epochs {}'.format(args.epochs,
                                                                                args.learning_rate_adjust_frequency),
                                                                                logfile, summaryfile)

    # DataParallel will divide and allocate batch_size to all available GPUs
    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print_and_write("=> loading checkpoint '{}'".format(args.resume), logfile, summaryfile)
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print_and_write("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), logfile,
                            summaryfile)
        else:
            print_and_write("=> no checkpoint found at '{}'".format(args.resume), logfile, summaryfile)

    cudnn.benchmark = True

    if args.arch in ['sparsescatnet', 'sparsescatnetw']:

        lambda_0_max = compute_lambda_0(train_loader, model).item()

        if args.l0_inf_init:
            with torch.no_grad():
                model.module.istc.lambda_0.data.fill_(lambda_0_max)
                model.module.istc.log_lambda_0.data = torch.log(model.module.istc.lambda_0.data)
                model.module.istc.gamma.data.fill_(np.power(args.lambda_star / lambda_0_max, 1. / args.n_iterations))
                model.module.istc.log_gamma.data = torch.log(model.module.istc.gamma.data)
                for i in range(args.n_iterations - 1):
                    model.module.istc.lambdas.data[i] = lambda_0_max * (model.module.istc.gamma.data ** (i + 1))
                    model.module.istc.log_lambdas.data[i] = torch.log(model.module.istc.lambdas.data[i])

        print_and_write('Lambda star lower bound {:.3f}'.format(args.lambda_star_lb), logfile, summaryfile)
        print_and_write('epsilon lambda_0 {}'.format(args.epsilon_lambda_0), logfile, summaryfile)

        with torch.no_grad():
            with np.printoptions(precision=2, suppress=True):
                    lambda_0_init = model.module.istc.lambda_0.data.cpu().item()
                    print_and_write('Lambda_0 init {:.2f}'.format(lambda_0_init), logfile, summaryfile)

                    lambdas_init = model.module.istc.lambdas.data.cpu().numpy()
                    print_and_write('Lambdas init {}'.format(lambdas_init), logfile, summaryfile)

                    print_and_write('Lambda_star init {:.2f}'.format(args.lambda_star), logfile, summaryfile)

                    gamma_init = model.module.istc.gamma.data.cpu().item()
                    print_and_write('Gamma init {:.2f}'.format(gamma_init), logfile, summaryfile)

            count = 0
            for i in range(args.dictionary_size):
                if model.module.istc.dictionary_weight.data[:, i].norm(p=2) < 0.99 or \
                        model.module.istc.dictionary_weight.data[:, i].norm(p=2) > 1.01:
                    count += 1

            if count == 0:
                print_and_write("Dictionary atoms initially well normalized", logfile,summaryfile)
            else:
                print_and_write("{} dictionary atoms not initially well normalized".format(count), logfile, summaryfile)

            gram = torch.matmul(model.module.istc.w_weight.data[..., 0, 0].t(),
                                      model.module.istc.dictionary_weight.data[..., 0, 0]).cpu().numpy()

            if args.arch == 'sparsescatnetw':
                count = 0
                for i in range(args.dictionary_size):
                    if gram[i, i] < 0.99 or gram[i, i] > 1.01:
                        count += 1
                if count == 0:
                    print_and_write("W^T D diagonal elements well equal to 1", logfile, summaryfile)
                else:
                    print_and_write("{} W^T D diagonal elements not equal to 1".format(count),
                                    logfile, summaryfile)

            gram = np.abs(gram)
            for i in range(args.dictionary_size):
                gram[i, i] = 0

            print_and_write("Initial max coherence {:.3f}, median coherence {:.3f}".
                            format(np.max(gram), np.median(gram)), logfile, summaryfile)

    if args.evaluate:
        print_and_write("Evaluating model at epoch {}...".format(args.start_epoch), logfile)
        validate(val_loader, model, criterion, args.start_epoch, args, logfile, summaryfile, writer)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args, logfile, writer)

        # evaluate on validation set
        acc1, acc5 = validate(val_loader, model, criterion, epoch, args, logfile, summaryfile, writer)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        if is_best:
            best_epoch_acc1 = epoch
        if acc5 > best_acc5:
            best_acc5 = acc5
            best_epoch_acc5 = epoch
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': args.arch,
            'state_dict': model.state_dict(),
            'best_acc1': best_acc1,
            'optimizer': optimizer.state_dict(),
        }, is_best, checkpoint_filename=checkpoint_savefile, best_checkpoint_filename=best_checkpoint_savefile)

    if args.arch in ['sparsescatnet', 'sparsescatnetw']:
        with torch.no_grad():
            with np.printoptions(precision=2, suppress=True):

                lambda_0_final = model.module.istc.lambda_0.data.cpu().item()
                print_and_write('Lambda_0 final {:.2f}'.format(lambda_0_final), logfile, summaryfile)

                lambdas_final = model.module.istc.lambdas.data.cpu().numpy()
                print_and_write('Lambdas final {}'.format(lambdas_final), logfile, summaryfile)

                lambda_star_final = model.module.istc.lambda_star.data.cpu().item()
                print_and_write('Lambda_star final {:.2f}'.format(lambda_star_final), logfile, summaryfile)

                gamma_final = model.module.istc.gamma.data.cpu().item()
                print_and_write('Gamma final {:.2f}'.format(gamma_final), logfile, summaryfile)

            count = 0
            for i in range(args.dictionary_size):
                if model.module.istc.dictionary_weight.data[:, i].norm(p=2) < 0.99 or \
                        model.module.istc.dictionary_weight.data[:, i].norm(p=2) > 1.01:
                    count += 1

            if count == 0:
                print_and_write("Dictionary atoms finally well normalized", logfile, summaryfile)
            else:
                print_and_write("{} dictionary atoms not finally well normalized".format(count), logfile, summaryfile)

            gram = torch.matmul(model.module.istc.w_weight.data[..., 0, 0].t(),
                                model.module.istc.dictionary_weight.data[..., 0, 0]).cpu().numpy()

            if args.arch == 'sparsescatnetw':
                count = 0
                for i in range(args.dictionary_size):
                    if gram[i, i] < 0.99 or gram[i, i] > 1.01:
                        count += 1
                if count == 0:
                    print_and_write("W^T D diagonal elements well equal to 1", logfile, summaryfile)
                else:
                    print_and_write("{} W^T D diagonal elements not equal to 1".format(count),
                                    logfile, summaryfile)

            gram = np.abs(gram)
            for i in range(args.dictionary_size):
                gram[i, i] = 0

            print_and_write("Final max coherence {:.3f}, median coherence {:.3f}".
                            format(np.max(gram), np.median(gram)), logfile, summaryfile)

    print_and_write(
        "Best top 1 accuracy {:.2f} at epoch {}, best top 5 accuracy {:.2f} at epoch {}".format(best_acc1,
                                                                                                best_epoch_acc1,
                                                                                                best_acc5,
                                                                                                best_epoch_acc5),
        logfile, summaryfile)
コード例 #21
0
    def search(self, analysisResult=AnalysisResult(), pageSize=10, offset=0):
        """
        (AnalysisResult, pageSize, offset) -> [AnalysisResult]
        search by analysisResult
        """
        session = self.session_factory()
        query = session.query(AnalysisResultDB).filter(
            or_(AnalysisResultDB.idAnalysis == analysisResult.analysis.id,
                AnalysisResultDB.idDisease == analysisResult.disease.id,
                AnalysisResultDB.score == analysisResult.score,
                AnalysisResultDB.frame == analysisResult.frame))
        content = query.slice(offset, pageSize).all()
        total = query.count()
        analysisResults = []
        for analysisResultDB in content:
            analysisResults.append(
                AnalysisResult(
                    analysisResultDB.id,
                    Analysis(
                        analysisResultDB.analysis.id,
                        Image(
                            analysisResultDB.analysis.image.id,
                            Disease(
                                analysisResultDB.analysis.image.disease.id,
                                Plant(
                                    analysisResultDB.analysis.image.disease.
                                    plant.id, analysisResultDB.analysis.image.
                                    disease.plant.scientificName,
                                    analysisResultDB.analysis.image.disease.
                                    plant.commonName), analysisResultDB.
                                analysis.image.disease.scientificName,
                                analysisResultDB.analysis.image.disease.
                                commonName),
                            analysisResultDB.analysis.image.url,
                            analysisResultDB.analysis.image.description,
                            analysisResultDB.analysis.image.source,
                            analysisResultDB.analysis.image.size),
                        Classifier(
                            analysisResultDB.analysis.classifier.id,
                            Plant(
                                analysisResultDB.analysis.classifier.plant.id,
                                analysisResultDB.analysis.classifier.plant.
                                scientificName, analysisResultDB.analysis.
                                classifier.plant.commonName),
                            analysisResultDB.analysis.classifier.tag,
                            analysisResultDB.analysis.classifier.path),
                        user=User(
                            id=analysisResultDB.analysis.user.id,
                            idType=analysisResultDB.analysis.user.idType,
                            email=analysisResultDB.analysis.user.email,
                            username=analysisResultDB.analysis.user.username)),
                    Disease(
                        analysisResultDB.disease.id,
                        Plant(analysisResultDB.disease.plant.id,
                              analysisResultDB.disease.plant.scientificName,
                              analysisResultDB.disease.plant.commonName),
                        analysisResultDB.disease.scientificName,
                        analysisResultDB.disease.commonName),
                    analysisResultDB.score, analysisResultDB.frame))

        return {'total': total, 'content': analysisResults}
コード例 #22
0
class FullModel(pl.LightningModule):
    def __init__(self, model_name, vocab, lr, lr_decay, batch_size=64):
        """
        PyTorch Lightning module that creates the overall model.
        Inputs:
            model_name - String denoting what encoder class to use.  Either 'AWE', 'UniLSTM', 'BiLSTM', or 'BiLSTMMax'
            vocab - Vocabulary from alignment between SNLI dataset and GloVe vectors
            lr - Learning rate to use for the optimizer
            lr_decay - Learning rate decay factor to use each epoch
            batch_size - Size of the batches. Default is 64
        """
        super().__init__()
        self.save_hyperparameters()

        # create an embedding layer for the vocabulary embeddings
        self.glove_embeddings = nn.Embedding.from_pretrained(vocab.vectors)

        # check which encoder model to use
        if model_name == 'AWE':
            self.encoder = AWEEncoder()
            self.classifier = Classifier()
        elif model_name == 'UniLSTM':
            self.encoder = UniLSTM()
            self.classifier = Classifier(input_dim=4 * 2048)
        elif model_name == 'BiLSTM':
            self.encoder = BiLSTM()
            self.classifier = Classifier(input_dim=4 * 2 * 2048)
        else:
            self.encoder = BiLSTMMax()
            self.classifier = Classifier(input_dim=4 * 2 * 2048)

        # create the loss function
        self.loss_function = nn.CrossEntropyLoss()

        # create instance to save the last validation accuracy
        self.last_val_acc = None

    def forward(self, sentences):
        """
        The forward function calculates the loss for a given batch of sentences.
        Inputs:
            sentences - Batch of sentences with (premise, hypothesis, label) pairs
        Ouptuts:
            loss - Cross entropy loss of the predictions
            accuracy - Accuracy of the predictions
        """

        # get the sentence lengths of the batch
        lengths_premises = torch.tensor(
            [x[x != 1].shape[0] for x in sentences.premise],
            device=self.device)
        lengths_hypothesis = torch.tensor(
            [x[x != 1].shape[0] for x in sentences.hypothesis],
            device=self.device)

        # pass premises and hypothesis through the embeddings
        premises = self.glove_embeddings(sentences.premise)
        hypothesis = self.glove_embeddings(sentences.hypothesis)

        # forward the premises and hypothesis through the Encoder
        premises = self.encoder(premises, lengths_premises)
        hypothesis = self.encoder(hypothesis, lengths_hypothesis)

        # calculate the difference and multiplication
        difference = torch.abs(premises - hypothesis)
        multiplication = premises * hypothesis

        # create the sentence representations
        sentence_representations = torch.cat(
            [premises, hypothesis, difference, multiplication], dim=1)

        # pass through the classifier
        predictions = self.classifier(sentence_representations)

        # calculate the loss and accuracy
        loss = self.loss_function(predictions, sentences.label)
        predicted_labels = torch.argmax(predictions, dim=1)
        accuracy = torch.true_divide(
            torch.sum(predicted_labels == sentences.label),
            torch.tensor(sentences.label.shape[0],
                         device=sentences.label.device))

        # return the loss and accuracy
        return loss, accuracy

    # function that configures the optimizer for the model
    def configure_optimizers(self):
        # create optimizer
        optimizer = torch.optim.SGD([{
            'params': self.encoder.parameters()
        }, {
            'params': self.classifier.parameters()
        }],
                                    lr=self.hparams.lr)

        # freeze the embeddings
        self.glove_embeddings.weight.requires_grad = False

        # create learning rate decay
        lr_scheduler = {
            'scheduler':
            StepLR(optimizer=optimizer,
                   step_size=1,
                   gamma=self.hparams.lr_decay),
            'name':
            'learning_rate'
        }

        # return the scheduler and optimizer
        return [optimizer], [lr_scheduler]

    # function that performs a training step
    def training_step(self, batch, batch_idx):
        # forward the batch through the model
        train_loss, train_acc = self.forward(batch)

        # log the training loss and accuracy
        self.log("train_loss", train_loss, on_step=False, on_epoch=True)
        self.log("train_acc", train_acc, on_step=False, on_epoch=True)

        # return the training loss
        return train_loss

    # function that performs a validation step
    def validation_step(self, batch, batch_idx):
        # forward the batch through the model
        val_loss, val_acc = self.forward(batch)

        # log the validation loss and accuracy
        self.log("val_loss", val_loss)
        self.log("val_acc", val_acc)

        # save the validation accuracy
        self.last_val_acc = val_acc

    # function that performs a test step
    def test_step(self, batch, batch_idx):
        # forward the batch through the model
        test_loss, test_acc = self.forward(batch)

        # log the test loss and accuracy
        self.log("test_loss", test_loss)
        self.log("test_acc", test_acc)
コード例 #23
0
    def compile(self):

        if self.compiled:
            print('Model already compiled.')
            return
        self.compiled = True

        # Placeholders.
        self.X = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name='X')
        self.Y = tf.placeholder(tf.float32, shape=(None, 32, 32, 2), name='Y')
        self.labels = tf.placeholder(tf.float32, shape=(None, 10), name='labels')

        # Generator.
        generator = Generator(self.seed)

        # Discriminator.
        discriminator = Discriminator(self.seed)

        # Classifier.
        classifier = Classifier(self.seed)

        self.gen_out = generator.forward(self.X)
        disc_out_real = discriminator.forward(tf.concat([self.X, self.Y], 3))
        disc_out_fake = discriminator.forward(tf.concat([self.X, self.gen_out], 3), reuse_vars=True)

        # VAC-GAN classifier losses.
        classifier_real = classifier.forward(tf.concat([self.X, self.Y], 3))
        classfier_fake = classifier.forward(tf.concat([self.X, self.gen_out], 3), reuse_vars=True)
        classifier_l_real = tf.nn.softmax_cross_entropy_with_logits_v2(logits=classifier_real, labels=self.labels)
        classifier_l_fake = tf.nn.softmax_cross_entropy_with_logits_v2(logits=classfier_fake, labels=self.labels)
        self.classifier_loss_real = tf.reduce_mean(classifier_l_real)
        self.classifier_loss_fake = tf.reduce_mean(classifier_l_fake)
        self.classifier_loss = tf.reduce_mean(classifier_l_fake + classifier_l_real)

        # Generator loss.
        self.gen_loss_gan = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_out_fake, labels=tf.ones_like(disc_out_fake)))
        self.gen_loss_l1 = tf.reduce_mean(tf.abs(self.Y - self.gen_out)) * self.l1_weight
        self.gen_loss = self.gen_loss_gan + self.gen_loss_l1 + self.VAC_weight * self.classifier_loss

        # Discriminator losses.
        disc_l_fake = tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_out_fake, labels=tf.zeros_like(disc_out_fake))
        disc_l_real = tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_out_real, labels=tf.ones_like(disc_out_real)*self.label_smoothing)
        self.disc_loss_fake = tf.reduce_mean(disc_l_fake)
        self.disc_loss_real = tf.reduce_mean(disc_l_real)
        self.disc_loss = tf.reduce_mean(disc_l_fake + disc_l_real)

        # Global step.
        self.global_step = tf.Variable(0, name='global_step', trainable=False)

        # Learning rate.
        if self.learning_rate_decay:
            self.lr = tf.maximum(1e-6, tf.train.exponential_decay(
                learning_rate=self.learning_rate,
                global_step=self.global_step,
                decay_steps=self.learning_rate_decay_steps,
                decay_rate=self.learning_rate_decay_rate))
        else:
            self.lr = tf.constant(self.learning_rate)

        # Optimizers.
        self.gen_optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.gen_loss, var_list=generator.variables)
        self.disc_optimizer = tf.train.AdamOptimizer(learning_rate=self.lr/10).minimize(self.disc_loss, var_list=discriminator.variables)
        self.classifier_optimizer = tf.train.AdamOptimizer(learning_rate=self.lr/10).minimize(self.classifier_loss, var_list=classifier.variables, global_step=self.global_step)

        # Sampler.
        gen_sample = Generator(self.seed, is_training=False)
        self.sampler = gen_sample.forward(self.X, reuse_vars=True)

        self.MAE = tf.reduce_mean(tf.abs(self.Y - self.sampler))

        self.saver = tf.train.Saver()