Esempio n. 1
0
def model_fn(model_dir):
    """
    Loads the PyTorch model from the `model_dir` directory.
    
    :param model_dir: model directory
    :return: model created
    """
    print("Loading model.")

    model_info = {}
    model_info_path = os.path.join(model_dir, 'model_info.pth')
    with open(model_info_path, 'rb') as f:
        model_info = torch.load(f)

    print("model_info: {}".format(model_info))

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = Regression(model_info['input_features'], model_info['hidden_dim1'],
                       model_info['hidden_dim2'], model_info['output_dim'])

    model_path = os.path.join(model_dir, 'model.pth')
    with open(model_path, 'rb') as f:
        model.load_state_dict(torch.load(f))

    model.to(device).eval()

    print("Done loading model.")
    return model
Esempio n. 2
0
    def __call__(self,
                 number_of_iterations=2,
                 learning_rate=0.005,
                 embedding_size=300):
        print("Starting 'Image Retrieval' in 'Regression' mode with '" +
              self.difficulty + "' data")

        self.model_full_path = self.model_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".pty"
        self.output_file_name = self.output_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".csv"

        self.number_of_iterations = number_of_iterations
        self.learning_rate = learning_rate
        self.embedding_size = embedding_size

        self.model = Regression(self.nwords, self.embedding_size,
                                self.image_feature_size,
                                self.output_vector_size)
        self.criterion = nn.MSELoss()

        self.evaluate = Evaluate(self.model, self.img_features, self.minibatch,
                                 self.preprocess, self.image_feature_size)
        print(self.model)

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.learning_rate)

        self.train_loss_values = []
        self.dev_loss_values = []
        self.test_loss_values = []

        self.magic()

        self.save_model()

        self.save_data()
Esempio n. 3
0
    def __init__(self, args,
                 trainRegressionDataLoader, trainRegressionClassificationLoader,
                 testDataLoader, trainRainFallLoader,
                 means, std):

        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.trainRegressionDataLoader = trainRegressionDataLoader
        self.trainRegressionClassificationLoader = trainRegressionClassificationLoader
        self.testDataLoader = testDataLoader
        self.classificationLoader = trainRainFallLoader

        self.run_datetime = datetime.datetime.now()

        self.out_path = args.out
        self.sigma = args.sigma
        self.beta = args.beta
        self.earlyStop = args.earlyStop
        self.nClass = args.nClass

        self.noiseMean = torch.zeros(args.batch_size, args.featureNums, 17, 17)
        self.noiseStd = 1e-3

        self.model = AutoencoderBN(self.noiseMean, self.noiseStd).to(self.device)
        self.regressionModel = Regression(self.nClass).to(self.device)
        self.classificationModel = regressionClassification(self.nClass).to(self.device)

        self.rainFallClassifierModel = rainFallClassification().to(self.device)
        self.meanStdNormalizer = MeanVarianceNormalizer(means, std).to(self.device)

        self.meanvarLoss = MeanVarLoss(self.nClass).to(self.device)
        self.normaliedLoss = NormalizerLoss(std).to(self.device)
        self.focalLoss = FocalLoss(self.nClass, alpha=0.25, gamma=2).to(self.device)
        self.rainFocalLoss = FocalLoss(2, alpha=0.25, gamma=2).to(self.device)

        self.regressionOptim = torch.optim.Adam([
            {'params': self.regressionModel.parameters(), 'lr': args.lr,
             'weight_decay': args.weight_decay},
            {'params': self.model.parameters(), 'lr': args.lr,
             'weight_decay': args.weight_decay},
        ],
            lr=args.lr * 10, weight_decay=args.weight_decay * 10)

        self.classificationOptim = torch.optim.Adam(self.classificationModel.parameters(), lr=args.lr * 100)

        self.rainFallOptim = torch.optim.Adam(self.rainFallClassifierModel.parameters(), lr=args.lr * 10)

        # self.reconstructOptim = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.regressionOptim, step_size=750 * 2)

        self.criterion = nn.MSELoss()

        self.classificationCriterion = nn.CrossEntropyLoss()

        if not os.path.exists(self.out_path):
            os.makedirs(self.out_path)

        self.logger = Logger(self.out_path)

        with open(os.path.join(self.out_path, "para.json"), "w") as f:
            json.dump(args.__dict__, f)

        self.epoch = 0
        self.iteration = 0
        self.classificationIteration = 0
        self.rainfallclassificationIteration = 0
        self.test_step = 0
        self.max_epoch = args.epochs
        self.val_interval = args.interval
        self.res = 0
        self.bestConstructLoss = 1e7
        self.bestConstructEpoch = 0
        self.best_error = 1e7;
        self.best_res_epoch = 0
Esempio n. 4
0
class AutoEncoderTrainer(object):
    def __init__(self, args,
                 trainRegressionDataLoader, trainRegressionClassificationLoader,
                 testDataLoader, trainRainFallLoader,
                 means, std):

        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.trainRegressionDataLoader = trainRegressionDataLoader
        self.trainRegressionClassificationLoader = trainRegressionClassificationLoader
        self.testDataLoader = testDataLoader
        self.classificationLoader = trainRainFallLoader

        self.run_datetime = datetime.datetime.now()

        self.out_path = args.out
        self.sigma = args.sigma
        self.beta = args.beta
        self.earlyStop = args.earlyStop
        self.nClass = args.nClass

        self.noiseMean = torch.zeros(args.batch_size, args.featureNums, 17, 17)
        self.noiseStd = 1e-3

        self.model = AutoencoderBN(self.noiseMean, self.noiseStd).to(self.device)
        self.regressionModel = Regression(self.nClass).to(self.device)
        self.classificationModel = regressionClassification(self.nClass).to(self.device)

        self.rainFallClassifierModel = rainFallClassification().to(self.device)
        self.meanStdNormalizer = MeanVarianceNormalizer(means, std).to(self.device)

        self.meanvarLoss = MeanVarLoss(self.nClass).to(self.device)
        self.normaliedLoss = NormalizerLoss(std).to(self.device)
        self.focalLoss = FocalLoss(self.nClass, alpha=0.25, gamma=2).to(self.device)
        self.rainFocalLoss = FocalLoss(2, alpha=0.25, gamma=2).to(self.device)

        self.regressionOptim = torch.optim.Adam([
            {'params': self.regressionModel.parameters(), 'lr': args.lr,
             'weight_decay': args.weight_decay},
            {'params': self.model.parameters(), 'lr': args.lr,
             'weight_decay': args.weight_decay},
        ],
            lr=args.lr * 10, weight_decay=args.weight_decay * 10)

        self.classificationOptim = torch.optim.Adam(self.classificationModel.parameters(), lr=args.lr * 100)

        self.rainFallOptim = torch.optim.Adam(self.rainFallClassifierModel.parameters(), lr=args.lr * 10)

        # self.reconstructOptim = torch.optim.Adam(self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.regressionOptim, step_size=750 * 2)

        self.criterion = nn.MSELoss()

        self.classificationCriterion = nn.CrossEntropyLoss()

        if not os.path.exists(self.out_path):
            os.makedirs(self.out_path)

        self.logger = Logger(self.out_path)

        with open(os.path.join(self.out_path, "para.json"), "w") as f:
            json.dump(args.__dict__, f)

        self.epoch = 0
        self.iteration = 0
        self.classificationIteration = 0
        self.rainfallclassificationIteration = 0
        self.test_step = 0
        self.max_epoch = args.epochs
        self.val_interval = args.interval
        self.res = 0
        self.bestConstructLoss = 1e7
        self.bestConstructEpoch = 0
        self.best_error = 1e7;
        self.best_res_epoch = 0

    def mask_norm(self, mask, threshold=0.5):

        mask_ = mask * (mask > threshold).float()
        mask_ = mask_ / mask_.sum(1).unsqueeze(-1)
        return mask_

    def generateOneHot(self, softmax):
        maxIdxs = torch.argmax(softmax, dim=1, keepdim=True).cpu().long()
        oneHotMask = torch.zeros(softmax.shape, dtype=torch.float32)
        oneHotMask = oneHotMask.scatter_(1, maxIdxs, 1.0)
        oneHotMask = oneHotMask.unsqueeze(-2)
        return oneHotMask

    def validate_one_epoch(self):
        self.model.eval()
        self.regressionModel.eval()
        self.classificationModel.eval()
        self.rainFallClassifierModel.eval()
        self.test_step += 1

        tsthreas = [0.1, 1, 10]

        tp = [0] * len(tsthreas)  # true positive
        tn = [0] * len(tsthreas)  # true negetive
        fp = [0] * len(tsthreas)  # false positve
        fn = [0] * len(tsthreas)  # false negetive
        ts = [0] * len(tsthreas)
        totalRegressionLoss = []
        totalReconstructLoss = []
        totalClassificationLoss = []
        totalRClassificationLoss = []
        total_error = 0
        total_count = 0
        p_error = 0
        ps_error = 0
        p_count = 0

        pxErrorList = [0] * (self.nClass)
        pxsErrorList = [0] * (self.nClass)
        pxCountList = [0] * (self.nClass)
        pxAverageError = [0] * (self.nClass)
        pxsAverageError = [0] * (self.nClass)

        classCorrect = [0] * (self.nClass)
        classCounnt = [0] * (self.nClass)
        accuray = [0] * (self.nClass + 1)

        rainCorrect = [0] * 2
        rainCount = [0] * 2
        rainAccuracy = [0] * 3

        for batch_idx, (data, target, rainClass, rainMask, regressionClass, regressionMask) in tqdm.tqdm(
                enumerate(self.testDataLoader), total=len(self.testDataLoader),
                desc='Test Test Data :', ncols=80,
                leave=False):

            rainNumpy = rainClass.numpy()
            regressionNumpy = regressionClass.numpy()
            one_hot_mask = regressionMask.numpy()
            gt_micaps = target.numpy()

            data = data.to(device=self.device)
            target = target.to(device=self.device)
            rainClass = rainClass.to(device=self.device)
            # rainMask = rainMask.to(device=self.device)
            regressionClass = regressionClass.to(device=self.device)
            regressionMask = regressionMask.to(device=self.device).unsqueeze(-2)

            with torch.no_grad():
                encoder, decoder = self.model(data)
                predictValues = self.regressionModel(encoder)
                rainPreds = self.rainFallClassifierModel(data)
                regressionPreds = self.classificationModel(data)

                rainPredsSoftMax = F.softmax(rainPreds, dim=1)
                regressionPredsSoftmax = F.softmax(regressionPreds, dim=1)

                # if predict class belong to the last class , the output will be set zero

                rainOneHotMask = self.generateOneHot(rainPredsSoftMax).to(self.device)
                regressionOneHotMask = self.generateOneHot(regressionPredsSoftmax).to(self.device)

                predictValues = self.meanStdNormalizer(predictValues).unsqueeze(-1)
                # print(predictValues[0])

                regressionValues = torch.matmul(regressionOneHotMask, predictValues).squeeze(-1)
                # print(regressionValues[0])
                zeros = torch.zeros(regressionValues.size()).to(self.device)

                regressionValues = torch.matmul(rainOneHotMask,
                                                torch.cat([zeros, regressionValues], dim=1).unsqueeze(-1)).squeeze(-1)

                # print("res: ",regressionValues[:10])
                # print("resSum: ",regressionValues.mean())
                # print("target: ",target[:10])

                # print(regressionValues[0])
                # print(target[0])

                # Three loss reconstruct loss , regression Loss and classification Loss
                regressionLoss = self.criterion(regressionValues, target)
                reconstructLoss = self.criterion(decoder, data)
                rainClassificationLoss = self.classificationCriterion(rainPreds, rainClass)
                regressionClassificationLoss = self.classificationCriterion(regressionPreds, regressionClass)

                rainPredicted = torch.argmax(rainPredsSoftMax, dim=1).cpu().numpy()
                predicted = torch.argmax(regressionPredsSoftmax, dim=1).cpu().numpy()

                for i in range(self.nClass):
                    classCorrect[i] += np.sum((predicted == i) * (regressionNumpy == i) * (rainNumpy == 1))
                    classCounnt[i] += np.sum((regressionNumpy == i) * (rainNumpy == 1))

                for i in range(2):
                    rainCorrect[i] += np.sum((rainPredicted == i) * (rainNumpy == i))
                    rainCount[i] += np.sum(rainNumpy == i)

                predictNumpy = regressionValues.cpu().numpy()
                # biasNumpy = resValues.cpu().numpy()
                # labelsIndex = predicted.cpu().numpy()
                # predictNumpy = np.array([[biasNumpy[i,0]*(idx<self.nClass) + self.center[idx]] for i,idx in enumerate(labelsIndex)])

                totalRegressionLoss.append(regressionLoss.item())
                totalReconstructLoss.append(reconstructLoss.item())
                totalClassificationLoss.append(regressionClassificationLoss.item())
                totalRClassificationLoss.append(rainClassificationLoss.item())

                gapValues = np.abs(predictNumpy - gt_micaps)

                total_error += np.sum(gapValues)
                total_count += gapValues.shape[0]
                # print(gt_micaps[:10])
                # print(one_hot_mask[:10])
                p_ae = (gt_micaps > 0.05) * gapValues
                p_error += np.sum(p_ae)
                ps_error += np.sum(p_ae ** 2)
                p_count += np.sum(gt_micaps > 0.05)

                for i in range(self.nClass):
                    ae = one_hot_mask[:, i].reshape(-1, 1) * gapValues
                    pxErrorList[i] += np.sum(ae)
                    pxsErrorList[i] += np.sum(ae ** 2)
                    pxCountList[i] += np.sum(one_hot_mask[:, i])

                for i, threas in enumerate(tsthreas):
                    tp[i] += np.sum((gt_micaps >= threas) * (predictNumpy >= threas))
                    tn[i] += np.sum((gt_micaps < threas) * (predictNumpy < threas))
                    fp[i] += np.sum((gt_micaps < threas) * (predictNumpy >= threas))
                    fn[i] += np.sum((gt_micaps >= threas) * (predictNumpy < threas))

        for i, _ in enumerate(tsthreas):
            ts[i] += round(tp[i] / (tp[i] + fp[i] + fn[i]), 5)

        totalAverageError = round(total_error / total_count, 5)
        pAverageError = round(p_error / p_count, 5)
        psAverageError = round(ps_error / p_count - pAverageError ** 2, 5)

        for i in range(self.nClass):
            pxAverageError[i] += round(pxErrorList[i] / pxCountList[i], 5)
            pxsAverageError[i] += round(pxsErrorList[i] / pxCountList[i] - pxAverageError[i] ** 2, 5)

        totalLoss = np.mean(totalRegressionLoss)
        totalRLoss = np.mean(totalReconstructLoss)
        totalCLoss = np.mean(totalClassificationLoss)

        for i in range(self.nClass):
            accuray[i] += round(classCorrect[i] / classCounnt[i], 5)
        accuray[self.nClass] += round(sum(classCorrect) / sum(classCounnt), 5)

        for i in range(2):
            rainAccuracy[i] += round(rainCorrect[i] / rainCount[i], 5)
        rainAccuracy[2] += round(sum(rainCorrect) / sum(rainCount), 5)

        info = {"test_regression_loss": totalLoss,
                "test_reconstruct_loss": totalRLoss,
                "test_classification_loss": totalCLoss,
                "aver_gap": totalAverageError,
                "aver_p_gap": pAverageError,
                "aver_ps_gap": psAverageError,
                "p_num": p_count,
                }

        tsDisplay = list(zip(tp, tn, fp, fn, ts))

        classStatistics = {
            "average_p_gap": pxAverageError,
            "aver_p_s_gap": pxsAverageError,
            "p_count": pxCountList,
            "ts_score": tsDisplay,
            "test_rain_classification_accuracy": rainAccuracy,
            "test_classification_accuracy": accuray,
        }

        print(info)
        print(classStatistics)

        if totalAverageError < self.best_error:
            self.best_error = totalAverageError
            self.best_res_epoch = self.epoch
            info["epoch"] = self.epoch
            info["modelParam"] = self.model.state_dict()
            info["regressionParam"] = self.regressionModel.state_dict()
            info["optimParam"] = self.regressionOptim.state_dict()
            torch.save(info, os.path.join(self.out_path, str(self.epoch) + "_checkpoints.pth"))

    def train_one_epoch_for_rainFall(self):
        classCorrect = [0] * 2
        classCounnt = [0] * 2
        accuray = [0] * 3
        self.rainFallClassifierModel.train()

        for batch_idx, (data, target, rainClass, rainMask, regressionClass, regressionMask) in tqdm.tqdm(
                enumerate(self.classificationLoader), total=len(self.classificationLoader),
                desc='Train RainFall Classification epoch=%d' % self.epoch, ncols=100, leave=False):
            iter_idx = batch_idx + self.epoch * len(self.classificationLoader)
            self.rainfallclassificationIteration = iter_idx

            assert self.rainFallClassifierModel.train
            self.rainFallOptim.zero_grad()

            logitNumpy = rainClass.numpy()

            data = data.to(device=self.device)
            logitsFloat = rainClass.float().to(device=self.device)
            logits = rainClass.to(device=self.device)

            preds = self.rainFallClassifierModel(data)
            predsSoftmax = F.softmax(preds, dim=1)

            classificationLoss = self.rainFocalLoss(preds, logits)
            # classificationLoss = self.classificationCriterion(preds, logits)

            classificationLoss.backward()

            self.rainFallOptim.step()

            classificationLossCpu = classificationLoss.item()

            predicted = torch.argmax(predsSoftmax, dim=1).cpu().numpy()

            for i in range(2):
                classCorrect[i] += np.sum((predicted == i) * (logitNumpy == i))
                classCounnt[i] += np.sum(logitNumpy == i)

            self.logger.scalar_summary("train_rainfall_classification_loss", classificationLossCpu,
                                       self.rainfallclassificationIteration + 1)
        for i in range(2):
            accuray[i] += round(classCorrect[i] / classCounnt[i], 5)
        accuray[2] += round(sum(classCorrect) / sum(classCounnt), 5)

        print("Train Rain Fall Classification Accuracy : ", accuray)

    def train_one_epoch_for_classification(self):
        classCorrect = [0] * self.nClass
        classCounnt = [0] * self.nClass
        accuray = [0] * (self.nClass + 1)
        self.classificationModel.train()
        for batch_idx, (data, target, rainClass, rainMask, regressionClass, regressionMask) in tqdm.tqdm(
                enumerate(self.trainRegressionClassificationLoader),
                total=len(self.trainRegressionClassificationLoader),
                desc='Train Classification epoch=%d' % self.epoch, ncols=100, leave=False):
            iter_idx = batch_idx + self.epoch * len(self.trainRegressionClassificationLoader)
            self.classificationIteration = iter_idx

            assert self.classificationModel.train
            self.classificationOptim.zero_grad()

            logitNumpy = regressionClass.numpy()

            data = data.to(device=self.device)
            logitsFloat = regressionClass.float().to(device=self.device)
            logits = regressionClass.to(device=self.device)

            preds = self.classificationModel(data)
            predsSoftmax = F.softmax(preds, dim=1)

            classificationLoss = self.focalLoss(preds, logits)
            # classificationLoss = self.classificationCriterion(preds, logits)
            meanLoss, varLoss = self.meanvarLoss(predsSoftmax, logitsFloat.unsqueeze(-1))

            loss = classificationLoss + 1 * meanLoss + 0.5 * varLoss
            loss.backward()

            self.classificationOptim.step()

            classificationLossCpu = loss.item()

            predicted = torch.argmax(predsSoftmax, dim=1).cpu().numpy()

            for i in range(self.nClass):
                classCorrect[i] += np.sum((predicted == i) * (logitNumpy == i))
                classCounnt[i] += np.sum(logitNumpy == i)

            self.logger.scalar_summary("train_classification_loss", classificationLossCpu,
                                       self.classificationIteration + 1)

        for i in range(self.nClass):
            accuray[i] += round(classCorrect[i] / classCounnt[i], 5)
        accuray[self.nClass] += round(sum(classCorrect) / sum(classCounnt), 5)

        print("Train classification Accuracy : ", accuray)

    def train_one_epoch_for_regression(self):
        self.model.train()
        self.regressionModel.train()

        for batch_idx, (data, target, rainClass, rainMask, regressionClass, regressionMask) in tqdm.tqdm(
                enumerate(self.trainRegressionDataLoader), total=len(self.trainRegressionDataLoader),
                desc='Train Regression epoch=%d' % self.epoch, ncols=100, leave=False):
            iter_idx = batch_idx + self.epoch * len(self.trainRegressionDataLoader)
            # if (self.iteration != 0) and (iter_idx - 1) != self.iteration:
            #     continue
            self.iteration = iter_idx

            assert self.regressionModel.training
            self.regressionOptim.zero_grad()

            # noise = torch.randn(data.size()).to(device=self.device)
            noise = torch.normal(mean=self.noiseMean, std=self.noiseStd).to(self.device)

            # noise = torch.normal(mean=self.noiseMean, std=self.noiseStd).to(device=self.device)
            data = data.to(device=self.device)

            rainMask = rainMask.to(device=self.device)
            regressionMask = regressionMask.to(device=self.device)

            noisedData = data + noise
            target = target.to(device=self.device)

            encoder, decoder = self.model(noisedData)
            predictValues = self.regressionModel(encoder)

            # thresholdMask = labels.narrow(1, 0, self.nClass).view(-1, 1, self.nClass)
            predictValues = self.meanStdNormalizer(predictValues)
            # resValues = torch.matmul(thresholdMask, predictValues).squeeze(-1)

            regressionLoss = self.normaliedLoss(predictValues, target, rainMask, regressionMask)
            # regressionLoss = self.criterion(resValues, target)
            constructLoss = self.criterion(decoder, data)
            # classificationLoss = self.classificationCriterion(preds, logits)
            # meanLoss, varLoss = self.meanvarLoss(predictClassesSoftmax, logitsFloat.unsqueeze(-1))

            loss = constructLoss + self.sigma * regressionLoss
            # loss = constructLoss + self.sigma* regressionLoss
            loss.backward()
            # for param in self.model.parameters():
            #     print(param.grad.data.sum())
            self.regressionOptim.step()

            constructLossCpu = constructLoss.item()
            regressionLossCpu = regressionLoss.item()
            self.logger.scalar_summary("train_construct_loss", constructLossCpu, self.iteration + 1)
            self.logger.scalar_summary("train_regression_loss", regressionLossCpu, self.iteration + 1)

    def run(self):
        for epoch in tqdm.trange(self.epoch, self.max_epoch,
                                 desc='Experiments ', ncols=100):
            self.epoch = epoch
            self.train_one_epoch_for_rainFall()
            self.train_one_epoch_for_classification()
            if self.epoch % args.interval == 0:
                self.validate_one_epoch()
            self.train_one_epoch_for_regression()
Esempio n. 5
0
class Train():
    def __init__(self, difficulty):
        self.data_path = "../data"
        self.model_path = "../models"
        self.output_path = "../outputs"
        self.difficulty = difficulty
        self.timestamp = str(int(time.time()))
        self.model_name = "regression_" + self.difficulty
        self.data = Data(difficulty=self.difficulty, data_path=self.data_path)
        (self.img_features, self.w2i, self.i2w, self.nwords, self.UNK,
         self.PAD) = self.data()
        self.train = list(self.data.get_train_data())
        self.dev = list(self.data.get_validation_data())
        self.test = list(self.data.get_test_data())
        self.image_feature_size = 2048
        self.output_vector_size = 10

    def __call__(self,
                 number_of_iterations=2,
                 learning_rate=0.005,
                 embedding_size=300):
        print("Starting 'Image Retrieval' in 'Regression' mode with '" +
              self.difficulty + "' data")

        self.model_full_path = self.model_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".pty"
        self.output_file_name = self.output_path + "/" + self.model_name + "_" + self.timestamp + "_" + str(
            learning_rate) + "_" + str(embedding_size) + ".csv"

        self.number_of_iterations = number_of_iterations
        self.learning_rate = learning_rate
        self.embedding_size = embedding_size

        self.model = Regression(self.nwords, self.embedding_size,
                                self.image_feature_size,
                                self.output_vector_size)
        self.criterion = nn.MSELoss()

        self.evaluate = Evaluate(self.model, self.img_features, self.minibatch,
                                 self.preprocess, self.image_feature_size)
        print(self.model)

        self.optimizer = optim.Adam(self.model.parameters(),
                                    lr=self.learning_rate)

        self.train_loss_values = []
        self.dev_loss_values = []
        self.test_loss_values = []

        self.magic()

        self.save_model()

        self.save_data()

    def minibatch(self, data, batch_size=50):
        for i in range(0, len(data), batch_size):
            yield data[i:i + batch_size]

    def preprocess(self, batch):
        """Helper function for functional batches"""
        correct_indexes = [observation[2] for observation in batch]
        img_ids = [observation[1] for observation in batch]
        text_features = [observation[0] for observation in batch]

        #Add Padding to max len of sentence in batch
        max_length = max(map(len, text_features))
        text_features = [
            txt + [self.PAD] * (max_length - len(txt)) for txt in text_features
        ]

        #return in "stacked" format
        return text_features, img_ids, correct_indexes

    def magic(self):
        for ITER in range(self.number_of_iterations):

            random.shuffle(self.train)
            train_loss = 0.0
            start = time.time()

            for iteration, batch in enumerate(self.minibatch(self.train)):
                #Outputs matrices of batch size
                text_features, h5_ids, correct_index = self.preprocess(batch)
                lookup_text_tensor = Variable(torch.LongTensor([text_features
                                                                ])).squeeze()

                target = np.empty([len(batch), self.image_feature_size])
                for obs, img_ids in enumerate(h5_ids):
                    target[obs] = self.img_features[img_ids[
                        correct_index[obs]]]

                target = Variable(
                    torch.from_numpy(target).type(torch.FloatTensor))

                #Run model and calculate loss
                prediction = self.model(lookup_text_tensor)
                loss = self.criterion(prediction, target)
                train_loss += loss.data[0]

                self.optimizer.zero_grad()
                self.model.zero_grad()
                loss.backward()
                self.optimizer.step()

                #if iteration % verbosity_interval == 0:
                #    print("ITERATION %r: %r: train loss/sent=%.4f, time=%.2fs" % (ITER+1, iteration, train_loss/(iteration + 1), time.time() - start))

            print(
                "ITERATION %r: train loss/sent=%.4f, time=%.2fs" %
                (ITER + 1, train_loss / len(self.train), time.time() - start))
            #print("Score on training", evaluate(train))
            #print("Score on development", evaluate(dev))
            self.train_loss_values.append(train_loss / len(self.train))
            self.dev_loss_values.append(self.evaluate.calculate_loss(self.dev))
            self.test_loss_values.append(
                self.evaluate.calculate_loss(self.test))

    def save_model(self):
        #Save model
        torch.save(self.model, self.model_full_path)
        print("Saved model has test score", self.evaluate(self.test))

    def plot(self):
        plt.plot(self.train_loss_values, label="Train loss")
        plt.plot(self.dev_loss_values, label="Validation loss")
        plt.plot(self.test_loss_values, label="Test loss")
        plt.legend(loc='best')
        plt.xlabel("Epochs")
        plt.ylabel("Loss")
        plt.title(self.model_name +
                  " - has loss with lr = %.4f, embedding size = %r" %
                  (self.learning_rate, self.embedding_size))
        plt.show()

    def save_data(self):
        file = open(self.output_file_name, "w")
        file.write(", ".join(map(str, self.train_loss_values)))
        file.write("\n")
        file.write(", ".join(map(str, self.dev_loss_values)))
        file.write("\n")
        file.write(", ".join(map(str, self.test_loss_values)))
        file.write("\n")
        file.write(str(self.evaluate(self.dev)))
        file.write("\n")
        file.close()
Esempio n. 6
0
def regression():
    # *********************    load the dataset and divide to X&y   ***********************
    from sklearn.datasets import make_blobs
    X, Y = make_blobs(cluster_std=0.9,
                      random_state=20,
                      n_samples=1000,
                      centers=10,
                      n_features=10)

    from Algorithms.ML_.helper.data_helper import split_train_val_test
    X, Xv, y, Yv, Xt, Yt = split_train_val_test(X, Y)
    print(X.shape, y.shape, Xv.shape, Yv.shape, Xt.shape, Yt.shape)

    # *********************   build model    ***********************
    from model import Regression
    from layer import Layer, Dense
    from activation import Activation, Softmax, Sigmoid, ReLU
    from regularization import Regularization, L1, L2, L12
    from optimizer import Vanilla
    model = Regression()
    input_size = X.shape[1]
    hidden_size = 50
    num_classes = 10
    learning_rate, reg_rate = 1e-3, 0.5
    model = Regression([
        Dense(hidden_size,
              input_shape=(input_size, ),
              activation=ReLU(),
              alpha=learning_rate,
              lambda_=reg_rate),
    ])
    model += Dense(num_classes,
                   activation=Softmax(),
                   alpha=learning_rate,
                   lambda_=reg_rate)  # add layer with +=
    model.compile()
    model.describe()
    # *********************    train   ***********************
    loss_train, loss_val = model.train(X,
                                       y,
                                       val=(Xv, Yv),
                                       iter_=5000,
                                       batch=32,
                                       return_loss=True,
                                       verbose=True)

    import matplotlib.pyplot as plt
    plt.plot(range(len(loss_train)), loss_train)
    plt.plot(range(len(loss_val)), loss_val)
    plt.legend(['train', 'val'])
    plt.xlabel('Iteration')
    plt.ylabel('Training loss')
    plt.title('Training Loss history')
    plt.show()
    # *********************    predict   ***********************
    pred_train = model.predict(X)
    pred_val = model.predict(Xv)
    pred_test = model.predict(Xt)

    import metrics
    print('train accuracy=', metrics.accuracy(y, pred_train))
    print('val accuracy=', metrics.accuracy(Yv, pred_val))
    print('test accuracy=', metrics.accuracy(Yt, pred_test))
    print('null accuracy=', metrics.null_accuracy(y))
    import metrics
    metrics.print_metrics(Yt, pred_test)
Esempio n. 7
0
def training():

    # Load data.
    print('Loading data...')
    try:
        with gfile.Open(MODEL_DIR + '/data', 'rb') as f:
            x_data, y_data = pickle.loads(f.read())
        print('  Old data found in {}.'.format(MODEL_DIR + '/data'))
    except:
        print('  Creation of a new set of data.')
        x_data, y_data = zip(*du.load_labels_data(DATA_DIRECTORY))
        with gfile.Open(MODEL_DIR + '/data', 'wb') as f:
            f.write(pickle.dumps((x_data, y_data)))

    # Load and save vocabulary.
    print('Loading vocabulary...')
    try:
        vocab_processor = learn.preprocessing.VocabularyProcessor.restore(
            MODEL_DIR + '/vocab')
        print("  Old vocabulary found in {}.".format(MODEL_DIR + '/vocab'))
    except:
        print("  Creation of a new vocabulary.")
        max_document_length = max([len(x.split(" ")) for x in y_data])
        vocab_processor = learn.preprocessing.VocabularyProcessor(
            max_document_length)
        vocab_processor.fit(y_data)
    vocab_processor_x = learn.preprocessing.VocabularyProcessor(
        4, vocabulary=vocab_processor.vocabulary_)
    vocab_processor.save(MODEL_DIR + '/vocab')
    print("  Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))

    # Write correspondance 'word ID' to 'word'.
    with open(MODEL_DIR + '/correspondance.tsv', 'w') as f:
        f.write('Word ID\tWord\n')
        for word, word_id in vocab_processor.vocabulary_._mapping.iteritems():
            f.write('{}\t{}\n'.format(str(word_id), word))

    with tf.Graph().as_default() as graph:
        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)

        # Create model.
        print('Creating model...')
        model = Regression(number_of_words=len(x_data[0]),
                           sequence_length=LENGTH_MAX,
                           vocab_size=len(vocab_processor.vocabulary_),
                           embedding_size=EMBEDDING_SIZE)

        # Define Training procedure.
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
        grads_and_vars = optimizer.compute_gradients(model.loss)
        train_op = optimizer.apply_gradients(grads_and_vars,
                                             global_step=global_step)

        # Checkpoint directory.
        checkpoint_path = MODEL_DIR + "/checkpoint.ckpt"
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=3)

    with tf.Session(graph=graph) as sess:

        # Initialize.
        print('Initializing...')
        sess.run(tf.global_variables_initializer())

        # Maybe restore model parameters.
        ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
        if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + '.index'):
            print("Restoring model parameters from %s." %
                  ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("Fresh parameters for this model.")

        # Tensorboard.
        dir_summary = MODEL_DIR + '/summary/' + datetime.datetime.now(
        ).isoformat()
        train_writer = tf.summary.FileWriter(dir_summary, sess.graph)
        merged_summary = tf.summary.merge_all()

        def train_step(x_batch, y_batch):
            """
            A single training step.
            """
            feed_dict = {model.input_x: x_batch, model.input_y: y_batch}

            summary, _, step, loss = sess.run(
                [merged_summary, train_op, global_step, model.loss], feed_dict)

            train_writer.add_summary(summary, step)
            time_str = datetime.datetime.now().isoformat()
            print("{}: step {}, loss {}".format(time_str, step, loss))

        # Generate batches.
        batch_generator = du.batch_iter(DATA_DIRECTORY, BATCH_SIZE, 200000)

        # Training loops.
        while True:
            x_text, y_text = zip(*batch_generator.next())

            x_batch = [" ".join(four_words) for four_words in x_text]
            x_batch = vocab_processor_x.transform(
                x_batch
            )  # list of token sequence = [[1,2,3,4], [5,6,7,8], [7,8,9,10]]
            y_batch = vocab_processor.transform(
                y_text
            )  # list of tokens sequences = [[1,3 2 5 6], [7,8,9,10,12,15,16]]

            x_batch = np.array([x for x in x_batch])
            y_batch = np.array([y for y in y_batch])

            # Pad sentences of variable lengths.
            y_batch = np.concatenate(
                (y_batch, np.zeros(
                    (len(y_batch), LENGTH_MAX - len(y_batch[1])))), 1)

            train_step(x_batch, y_batch)
            current_step = tf.train.global_step(sess, global_step)
            if current_step % SAVE_EVERY == 0:
                path = saver.save(sess,
                                  checkpoint_path,
                                  global_step=current_step)
                print("Saved model checkpoint to {}\n".format(path))
Esempio n. 8
0
def using(four_words_in_a_tuple):

    # Load data.
    print('Loading data...')
    try:  ## TODO: change try-except with is_file..
        with gfile.Open(MODEL_DIR + '/data', 'rb') as f:
            x_data, y_data = pickle.loads(f.read())
        print('  Old data found in {}.'.format(MODEL_DIR + '/data'))
    except:
        print("I cannot continue: no data has been found in {}.".format(
            MODEL_DIR + '/data'))
        return

    # Load and save vocabulary.
    print('Loading vocabulary...')
    try:
        vocab_processor = learn.preprocessing.VocabularyProcessor.restore(
            MODEL_DIR + '/vocab')
        print("  Old vocabulary found in {}.".format(MODEL_DIR + '/vocab'))
    except:
        print("I cannot continue: no vocabulary has been found in {}.".format(
            MODEL_DIR + '/vocab'))
        return
    vocab_processor_x = learn.preprocessing.VocabularyProcessor(
        4, vocabulary=vocab_processor.vocabulary_)

    with tf.Graph().as_default() as graph:
        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)

        # Create model.
        print('Creating model...')
        model = Regression(number_of_words=len(x_data[0]),
                           sequence_length=LENGTH_MAX,
                           vocab_size=len(vocab_processor.vocabulary_),
                           embedding_size=EMBEDDING_SIZE)

        # Checkpoint directory.
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)

    with tf.Session(graph=graph) as sess:

        # Initialize.
        print('Initializing...')
        sess.run(tf.global_variables_initializer())

        # Maybe restore model parameters.
        ckpt = tf.train.get_checkpoint_state(MODEL_DIR)
        if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path + '.index'):
            print("Restoring model parameters from %s." %
                  ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print("I cannot continue: no checkpoint has been found in {}.".
                  format(ckpt.model_checkpoint_path))
            return

        def test_step(x_batch, y_batch):
            """
            A single training step.
            """
            feed_dict = {model.input_x: x_batch, model.input_y: y_batch}

            scores = sess.run([model.scores], feed_dict)
            return scores

        x_text, y_text = zip(
            *[[four_words_in_a_tuple, 'help <<EOS>> help <<EOS>> help']])

        x_batch = [" ".join(four_words) for four_words in x_text]
        x_batch = vocab_processor_x.transform(
            x_batch
        )  # list of token sequence = [[1,2,3,4], [5,6,7,8], [7,8,9,10]]
        y_batch = vocab_processor.transform(
            y_text
        )  # list of tokens sequences = [[1,3 2 5 6], [7,8,9,10,12,15,16]]

        x_batch = np.array([x for x in x_batch])
        y_batch = np.array([y for y in y_batch])

        # Padding
        y_batch = np.concatenate(
            (y_batch, np.zeros(
                (len(y_batch), LENGTH_MAX - len(y_batch[0])))), 1)
        scores = test_step(x_batch, y_batch)

        return scores
Esempio n. 9
0
def testing():
    tf.reset_default_graph()
    with tf.Session() as sess:
        #sess = tf_debug.LocalCLIDebugWrapperSession(sess)

        # Definition of x_data, y_data for the definition of the model.
        x_data = [['i'] * 4] * 4
        y_data = [
            'man eat dog <<EOS>> help <<EOS>> pie',
            'man eat dog <<EOS>> fit <<EOS>> pile',
            'man eat dog <<EOS>> form <<EOS>> lip',
            'man eat dog god <<EOS>> bye <<EOS>> plot'
        ]

        # Creation of the vocabulary
        max_document_length = max([len(x.split(" ")) for x in y_data])
        vocab_processor = learn.preprocessing.VocabularyProcessor(
            max_document_length)
        vocab_processor.fit(y_data)
        vocab_processor_x = learn.preprocessing.VocabularyProcessor(
            4, vocabulary=vocab_processor.vocabulary_)
        print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
        #print(vocab_processor.vocabulary_._mapping) # print all vocabulary

        # Definition model
        # Create model.
        print('Creating model...')
        model = Regression(number_of_words=len(x_data[0]),
                           sequence_length=LENGTH_MAX,
                           vocab_size=len(vocab_processor.vocabulary_),
                           embedding_size=3)

        # Define Training procedure.
        print('training procedure')
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(0.001)
        grads_and_vars = optimizer.compute_gradients(model.loss)
        train_op = optimizer.apply_gradients(grads_and_vars,
                                             global_step=global_step)

        # Initialize.
        print('Initialize...')
        sess.run(tf.global_variables_initializer())
        print('End of initialization.')

        def train_step(x_batch, y_batch):
            """
            A single training step.
            """
            feed_dict = {
                model.input_x: x_batch,
                model.input_y: y_batch,
            }
            _, step, loss = sess.run([train_op, global_step, model.loss],
                                     feed_dict)
            time_str = datetime.datetime.now().isoformat()
            print("{}: step {}, loss {}".format(time_str, step, loss))

        # Training loops
        while True:
            x_text = (('man', 'dog', 'eat', 'pie'), ('man', 'dog', 'eat',
                                                     'pile'),
                      ('man', 'dog', 'eat', 'lip'), ('man', 'dog', 'eat',
                                                     'plot'))

            y_text = ('man eat dog <<EOS>> help <<EOS>> pie',
                      'man eat dog <<EOS>> fit <<EOS>> pile',
                      'man eat dog <<EOS>> form <<EOS>> lip',
                      'man eat dog god <<EOS>> bye <<EOS>> plot')

            x_batch = [" ".join(four_words) for four_words in x_text]
            x_batch = vocab_processor_x.transform(
                x_batch
            )  # list of token sequence = [[1,2,3,4], [5,6,7,8], [7,8,9,10]]
            y_batch = vocab_processor.transform(
                y_text
            )  # list of tokens sequences = [[1,3 2 5 6], [7,8,9,10,12,15,16]]

            x_batch = np.array([x for x in x_batch])
            y_batch = np.array([y for y in y_batch])

            # Padding
            y_batch = np.concatenate(
                (y_batch, np.zeros(
                    (len(y_batch), LENGTH_MAX - len(y_batch[1])))), 1)

            train_step(x_batch, y_batch)
Esempio n. 10
0
                        metavar='OUT',
                        help='output dim of model (default: 1)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.001)')

    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using device {}.".format(device))

    train_loader = _get_train_data_loader(args.batch_size, args.data_dir)

    model = Regression(args.input_features, args.hidden_dim1, args.hidden_dim2,
                       args.output_dim).to(device)

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    criterion = RMSELoss

    train(model, train_loader, args.epochs, criterion, optimizer, device)

    model_info_path = os.path.join(args.model_dir, 'model_info.pth')
    with open(model_info_path, 'wb') as f:
        model_info = {
            'input_features': args.input_features,
            'hidden_dim1': args.hidden_dim1,
            'hidden_dim2': args.hidden_dim2,
            'output_dim': args.output_dim,
        }
        torch.save(model_info, f)