예제 #1
0
def findAcc(path):
    img = cv.imread(getFilePath(ORIGINAL, path))
    low_green = np.array([0, 80, 0])
    high_green = np.array([150, 255, 100])
    hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)

    mask = cv.inRange(hsv_img, low_green, high_green)
    res = cv.bitwise_and(img, img, mask=mask)
    # cv.imwrite(nam + " Mask.png", mask)

    bw = cv.cvtColor(res, cv.COLOR_BGR2GRAY)
    bw = cv.GaussianBlur(bw, (3, 3), 0)

    a, thresh = cv.threshold(bw, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)
    kernel = np.ones((5, 5), np.uint8)

    canny = cv.Canny(bw, a * 0.5, a)
    # cv.imwrite(nam + " Canny.png", canny)

    canny = cv.dilate(canny, kernel, iterations=1)
    # cv.imwrite(nam + " Dilated.png", canny)
    # cv.imshow("ss", canny)

    area, tot = cv.countNonZero(canny), img.shape[0] * img.shape[1]
    date = datetime.datetime.now()
    cv.putText(img, 'Forest Cover ' + str(area / tot * 100), (56, 80),
               cv.FONT_HERSHEY_COMPLEX, 0.7, (2, 255, 2), 2)
    cv.putText(img,
               str(date.day) + '/' + str(date.month) + '/' + str(date.year),
               (600, 500), cv.FONT_HERSHEY_COMPLEX, 0.7, (2, 255, 2), 2)

    cv.imwrite(getFilePath(MODIFIED, path), img)
    return area / tot
예제 #2
0
파일: app.py 프로젝트: arvindpunk/Forestics
def home():
    imagePaths = []
    for imgdir in os.scandir(baseDirectory):
        imagePathObject = {
            'original': getFilePath(ORIGINAL, imgdir),
            'modified': getFilePath(MODIFIED, imgdir),
            'title': open(getFilePath(DATA, imgdir), 'r').read()
        }
        imagePaths.append(imagePathObject)
    # TO-DO: Sort imagePaths w.r.t last modifed
    return render_template('index.html', imagePaths=imagePaths)
예제 #3
0
    def on_epoch_end(self, epoch, logs={}):

        # Append the logs, losses and accuracies to the lists
        self.logs.append(logs)
        self.losses.append(logs.get('loss'))
        self.acc.append(logs.get('acc'))
        self.val_losses.append(logs.get('val_loss'))
        self.val_acc.append(logs.get('val_acc'))

        # Before plotting ensure at least 2 epochs have passed
        if len(self.losses) > 1:

            N = np.arange(0, len(self.losses))
            plt.figure()
            plt.plot(N, self.losses, label="train_loss")
            plt.plot(N, self.acc, label="train_acc")
            plt.plot(N, self.val_losses, label="val_loss")
            plt.plot(N, self.val_acc, label="val_acc")
            plt.title("Training Loss and Accuracy [Epoch {}]".format(epoch))
            plt.xlabel("Epoch #")
            plt.ylabel("Loss/Accuracy")
            plt.legend()

            plt.savefig(utils.getFilePath(TRAINING_IMG,'Epoch-{}.png'\
                .format(epoch)))
            plt.close()
예제 #4
0
 def startPipeline(self, pickled_filename):
     self.__preprocess()
     self.__embbed_dataset(
         ut.getFilePath(['pickled', 'word2vec'],
                        pickled_filename,
                        create=True))
     self.__merge()
예제 #5
0
def train_model(getSantizeStanceData):
    x_train, y_train, x_test, y_test = getSantizeStanceData.getTrainTestData()

    model = baseline_model(x_train)()

    print("[==] Model Summary")
    print(model.summary())

    print("[+] Training model")

    estimator = model.fit(x_train,
                          y_train,
                          validation_data=(x_test, y_test),
                          batch_size=batch_size,
                          callbacks=[plot_losses],
                          epochs=nb_epoch,
                          verbose=2)

    print("[==] Saving model")

    model.save(utils.getFilePath('pickled', visual.MODEL_NAME))

    print("--- %s seconds ---" % (time.time() - start_time))
예제 #6
0
    print("[+] Training model")

    estimator = model.fit(x_train,
                          y_train,
                          validation_data=(x_test, y_test),
                          batch_size=batch_size,
                          callbacks=[plot_losses],
                          epochs=nb_epoch,
                          verbose=2)

    print("[==] Saving model")

    model.save(utils.getFilePath('pickled', visual.MODEL_NAME))

    print("--- %s seconds ---" % (time.time() - start_time))


if __name__ == "__main__":
    #supply this to generate all the required dataset files
    #embedding type specifies the vector formation whether tokenizer or tf_idf
    pipline = pre.StanceDataPipeline(embedding_type='tokenizer')
    #specify the name of of the pickle file
    pipline.startPipeline(pickled_filename="word2vec_filename.pkl")

    getStanceData = pre.GetStanceData(path_to_vec_pkl=utils.getFilePath(
        ['pickled', 'word2vec'], filename='word2vec_filename.pkl'),
                                      embedding_type='tokenizer')
    getStanceData.getTrainTestData(shuffle=True)
    train_model(getSantizeStanceData=getStanceData)
예제 #7
0
import matplotlib
matplotlib.use('agg')

import matplotlib.pyplot as plt
import keras
import numpy as np
import utils

MODEL_NAME = "LSTM_CNN"
FOLDER_NAME = "training"

TRAINING_IMG = utils.getFilePath(foldername=[FOLDER_NAME, MODEL_NAME],
                                 create=True)


class TrainingPlot(keras.callbacks.Callback):

    # This function is called when the training begins
    def on_train_begin(self, logs={}):
        # Initialize the lists for holding the logs, losses and accuracies
        self.losses = []
        self.acc = []
        self.val_losses = []
        self.val_acc = []
        self.logs = []

    # This function is called at the end of each epoch
    def on_epoch_end(self, epoch, logs={}):

        # Append the logs, losses and accuracies to the lists
        self.logs.append(logs)
예제 #8
0
from mwapi import MWapi
from sc import SC
from utils import getFilePath
getFilePath(__file__)


class VOC:
    def __init__(self, temp_dir, temp_url):
        self.sc = SC()
        self.mwapi = MWapi(temp_dir, temp_url)

    def lookup(self, word):
        res = {"sc": None, "api": None}
        iscorrect, res_sc = self.sc.check(word)
        if not iscorrect:
            # res["candi"] = res_sc
            res = {"sc": res_sc}
            return res
        res_api = self.mwapi.lookup(word, mode="json")
        res["voc"] = res_api
        return res
예제 #9
0
import matplotlib
matplotlib.use('agg')

import matplotlib.pyplot as plt
import keras
import numpy as np
import utils

MODEL_NAME = "LSTM_CNN"
FOLDER_NAME = "training"

<<<<<<< HEAD
TRAINING_IMG = utils.getFilePath(
    foldername=[FOLDER_NAME,MODEL_NAME],
    create=True
)
=======
MODEL_NAME = "default"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TRAINING_IMG = os.path.join(BASE_DIR,'training')
TRAINING_IMG = os.path.join(TRAINING_IMG,MODEL_NAME)
>>>>>>> bf2b4ef115818a186ebc39a0b859ee08b2b49175

class TrainingPlot(keras.callbacks.Callback):

    # This function is called when the training begins
    def on_train_begin(self, logs={}):
        # Initialize the lists for holding the logs, losses and accuracies
        self.losses = []
        self.acc = []
        self.val_losses = []
예제 #10
0
import utils

BODY_KEY = "body"
STANCE_KEY = "stance"
HEADLINE_KEY = 'headline'

DATASET_FOLDER = utils.getFilePath('dataset',create=True)
PICKLED_FOLDER = 'pickled'

TRAIN_DATASET_FILES = {
    BODY_KEY: utils.getFilePath('dataset','train_bodies.csv'),
    STANCE_KEY:utils.getFilePath('dataset','train_stances.csv')
}
utils.checkRequiredFiles(TRAIN_DATASET_FILES)

TRAIN_DATASET_NUMPY = {    
    STANCE_KEY:utils.getFilePath(PICKLED_FOLDER,'train_stance_scipy.npy'),
    BODY_KEY:utils.getFilePath(PICKLED_FOLDER,'train_body_scipy.npy'),
    HEADLINE_KEY:utils.getFilePath(PICKLED_FOLDER,'train_headline_scipy.npy'),
}


TEST_DATASET_FILES = {
    BODY_KEY: utils.getFilePath('dataset','test_stances.csv'),
    STANCE_KEY:utils.getFilePath('dataset','competition_test_stances_label.csv'),
}

utils.checkRequiredFiles(TEST_DATASET_FILES)

TEST_DATASET_NUMPY = {
    STANCE_KEY:utils.getFilePath(PICKLED_FOLDER,'test_stance_scipy.npy'),