Ejemplo n.º 1
0
    def learn_face(self, nb_image, user):
        d = Detection(self.proto, self.model)
        face_images = d.get_face(nb_image, 0.8)
        # place where images to train the recognizer on are stored
        path_to_images = "temp/dataset/%s" % user
        if not os.path.exists(path_to_images):
            os.makedirs(path_to_images)
        i = 0
        for image in face_images:
            image_name = path_to_images + "/" + str(i) + ".jpg"
            i += 1
            # cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            cv2.imwrite(image_name, image)
        ExtractEmbeddings.main(self.dataset, self.embeddings, self.proto,
                               self.model, self.embedding_model)
        TrainModel.main(self.embeddings, self.recognizer, self.le)

        path_to_user_images = self.dataset + "/" + user
        shutil.rmtree(path_to_user_images)
        # zip output so it can be sent easily
        zipname = "%s_frdata.zip" % user
        output_zip = zipfile.ZipFile(zipname, 'w')
        # for folder, subfolders, files in os.walk("output"):
        #     for file in files:
        #         output_zip.write(os.path.join(folder, file), os.path.relpath(os.path.join(folder, file), "output"),
        #                          compress_type=zipfile.ZIP_DEFLATED)
        output_zip.write("output/le.pickle", "le.pickle")
        output_zip.write("output/recognizer.pickle", "recognizer.pickle")

        output_zip.close()
        return zipname
Ejemplo n.º 2
0
 def setUp(self):
     self.config = Config()
     self.test_train_model = TrainModel(self.config)
     self.dummy_data_df = pd.DataFrame(
         np.array([['val11', 'val12', 'val13'], ['val21', 'val22', 'val23'],
                   ['val31', 'val32', 'val33']]),
         columns=self.config.ALL_PRODUCT_ATTRS)
Ejemplo n.º 3
0
def main(params):

    train = params.train
    evaluate = params.evaluate

    dat = pd.read_csv('./data/iris_data.csv')

    X = dat[[x for x in dat.columns if x != 'class']]
    features = X.columns

    y = dat['class']

    if evaluate == 1:
        search = EvaluateModel(X, y)
        trials = search.run_trials(100)
        with open('./models/trials.pickle', 'wb') as handle:
            pickle.dump(trials, handle)

    if train == 1:
        clf = KNeighborsClassifier(n_neighbors=11)
        tm = TrainModel(X, y, clf, nfolds=10)
        production_model = tm.train_model()
        with open('./models/model.pickle', 'wb') as handle:
            pickle.dump(production_model, handle)
            pickle.dump(features, handle)
Ejemplo n.º 4
0
class TestTrainModel(TestCase):
    def setUp(self):
        self.config = Config()
        self.test_train_model = TrainModel(self.config)
        self.dummy_data_df = pd.DataFrame(
            np.array([['val11', 'val12', 'val13'], ['val21', 'val22', 'val23'],
                      ['val31', 'val32', 'val33']]),
            columns=self.config.ALL_PRODUCT_ATTRS)

    @mock.patch.object(Recommender, 'fit')
    def test_train(self, mocked_fit):

        # mock calls.
        self.test_train_model.read_data = MagicMock(
            return_value=self.dummy_data_df)
        self.test_train_model.write_data = MagicMock(return_value=None)
        self.test_train_model.transform_data = MagicMock(
            return_value=(([1, 2]), ([3, 4]), ([5, 6]), ([7, 8])))
        mocked_fit.return_value = (self.dummy_data_df, self.dummy_data_df)

        # call the method to be tested.
        self.test_train_model.train()

        # assertions to make sure all methods are called with expected arguments.
        self.test_train_model.read_data.assert_has_calls([
            call('dummy_path1'),
            call('dummy_path2'),
            call('dummy_path3'),
            call('dummy_path4')
        ])

        self.test_train_model.transform_data.assert_has_calls([
            call(self.dummy_data_df, self.config.ALL_PRODUCT_ATTRS, [
                'SID_IDX', 'CONFIG_ID', 'PRODUCT_CATEGORY', 'PRODUCT_TYPE',
                'BRAND'
            ], 'SID_IDX'),
            call(self.dummy_data_df, self.config.ALL_PRODUCT_ATTRS, [
                'CUSTOMER_IDX', 'CONFIG_ID', 'PRODUCT_CATEGORY',
                'PRODUCT_TYPE', 'BRAND'
            ], 'CUSTOMER_IDX')
        ])

        mocked_fit.assert_called_once_with(
            ([1, 2]), ([1, 2]), ([3, 4]), ([3, 4]), ([5, 6]), ([5, 6]),
            ([7, 8]), ([7, 8]), self.dummy_data_df, self.dummy_data_df)

        self.test_train_model.write_data.assert_called_once_with(
            self.dummy_data_df)
Ejemplo n.º 5
0
 def __init__(self, config_files, output_folder):
     self.name = strings.CONFIG_MANAGER_NAME
     self.output_dir =  os.path.join(create_timestamped_dir(output_folder))
     self._get_config(config_files)
     # Init logger
     file_log_level = str_to_loglevel[self.cfg.get('logging', 'file_log_level').lower()]
     console_log_level = str_to_loglevel[self.cfg.get('logging', 'console_log_level').lower()]
     configure_logging(file_log_path=os.path.join(self.output_dir, 'log.txt'),
                       console_log_level=console_log_level,
                       file_log_level=file_log_level)
     self.logger = logging.getLogger()
     # Copy config files
     config_files_output_dir = os.path.join(self.output_dir, self.name)
     copy_files(output_dir=config_files_output_dir,
                orig_files=config_files,
                logger=self.logger)
     self.logger.debug('Config files are saved to {}'.format(config_files_output_dir))
     # Getting config models
     self.language_config = config_models.LanguageConfig(cfg=self.cfg)
     self.cont_config = config_models.ContConfig(cfg=self.cfg)
     self.data_wrapper_config = config_models.DataWrapperConfig(cfg=self.cfg)
     self.embedding_config = config_models.EmbeddingConfig(cfg=self.cfg)
     self.training_config = config_models.TrainingConfig(cfg=self.cfg)
     self.validation_config = config_models.ValidationConfig(cfg=self.cfg)
     self.test_config = config_models.TestConfig(cfg=self.cfg)
     # Getting models
     self.cont_model = ContModel(cont_config=self.cont_config)
     self.data_model_wrapper = DataModelWrapper(data_wrapper_config=self.data_wrapper_config,
                                                embedding_config=self.embedding_config,
                                                language_config=self.language_config)
     if strings.VALID in self.data_model_wrapper.data_models.keys():
         self.validation_model = ValidModel(valid_config=self.validation_config,
                                            language_config=self.language_config,
                                            output_dir=self.output_dir)
     else:
         self.validation_model = None
     self.plot_model = PlotModel(input_dir=self.output_dir)
     self.training_model = TrainModel(train_config=self.training_config,
                                      data_model_wrapper=self.data_model_wrapper,
                                      language_config=self.language_config,
                                      output_dir=self.output_dir,
                                      validation_model=self.validation_model,
                                      plot_model=self.plot_model,
                                      cont_model=self.cont_model)
     if strings.TEST in self.data_model_wrapper.data_models.keys():
         self.test_model = TestModel(test_config=self.test_config,
                                     language_config=self.language_config,
                                     output_dir=self.output_dir)
         self.test_model.set_datamodel(self.data_model_wrapper.data_models[strings.TEST])
     else:
         self.test_model = None
Ejemplo n.º 6
0
    def test_model(self, model='model.npz'):
        '''TEST_MODEL

        Segment a new brain slice with the trained model,
        getting predicted labels and calculate similar coefficient.

        '''

        if self.data.shape[0] == 0:
            print("Test slice - This slice has no CSF, GM and WM.")
            return

        # Computation graph
        data_num = self.data.shape[0]
        x = tf.placeholder(tf.float32, shape=[data_num, self.fn])

        # Obtain the result form the model
        net = TrainModel().build_network(x)
        y_out = net.outputs

        # Obtain the predicted labels
        pred = tf.reshape(tf.argmax(y_out, 1) + 1, shape=[data_num])

        # Assign model's weights with the saved parameters
        sess = tf.Session()
        params = tl.files.load_npz(name=model)
        tl.files.assign_params(sess, params, net)

        y_pred = sess.run(pred, feed_dict={x: self.data[:, 1:-1]})
        y_pred = y_pred.reshape((-1, 1))
        y_true = self.data[:, -1].reshape((-1, 1)).astype(int)

        # Calculate similar coefficient
        self.similar_coefficient(y_true, y_pred, 'di')
        self.similar_coefficient(y_true, y_pred, 'ji')

        sess.close()

        self.pred = y_pred
        self.true = y_true

        return
scaler = MinMaxScaler()
numerical = ['home_skills_total', 'away_skills_total', 'home_height_total', 'away_height_total', 'away_shoton',
            'home_shoton', 'away_shotoff', 'home_shotoff', 'away_fouls', 'home_fouls',
            'away_cards', 'home_cards', 'away_crosses', 'home_crosses', 'away_corners',
            'home_corners', 'away_possession', 'home_possession']

features_final = pd.DataFrame(data = processed_data)
features_final[numerical] = scaler.fit_transform(processed_data[numerical])

#split into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features_final, labels_final, test_size=0.2, random_state=0) 

#Naive Bayes
from sklearn.naive_bayes import GaussianNB
naive_bayes_learner = GaussianNB()
naive_bayes_model = TrainModel(X_train.shape[0], naive_bayes_learner, X_train, y_train, X_test, y_test)
naive_bayes_model.train_predict()
print(naive_bayes_model.get_results())

#KNN
from sklearn.neighbors import KNeighborsClassifier
knn_learner = KNeighborsClassifier()
k = [3, 4, 5]
knn_model = KNNModel(X_train.shape[0], knn_learner, X_train, y_train, X_test, y_test, k)
knn_model.grid_search()
print(knn_model.get_results())

#SVM
#stochastic gradient descent classfier
#random_forest_classifier
Ejemplo n.º 8
0
                                          output_dim=output_emb,
                                          input_length=seq_len)(inputs)
    forward_pass = tf.keras.layers.LSTM(rnn_unit,
                                        return_sequences=True)(embedding)
    forward_pass = tf.keras.layers.Dropout(dropout)(forward_pass)
    forward_pass = tf.keras.layers.LSTM(rnn_unit)(forward_pass)
    forward_pass = tf.keras.layers.Dense(dense_unit)(forward_pass)
    forward_pass = tf.keras.layers.Dropout(dropout)(forward_pass)
    outputs = tf.keras.layers.Dense(unique_notes + 1,
                                    activation="softmax")(forward_pass)

    model = tf.keras.Model(inputs=inputs,
                           outputs=outputs,
                           name='generate_scores_rnn')
    return model


model = create_model(seq_len, unique_notes)
model.summary()

optimizer = tf.keras.optimizers.RMSprop()
loss_fn = tf.keras.losses.categorical_crossentropy

train_class = TrainModel(EPOCHS, note_tokenizer, sampled_200_midi,
                         FRAME_PER_SECOND, BATCH_NNET_SIZE, BATCH_SONG,
                         optimizer, loss_fn, TOTAL_SONGS, model, seq_len)

train_class.train()
model.save('model_ep4.h5')
pickle.dump(note_tokenizer, open("tokenizer.p", "wb"))
#!/usr/bin/env python3

import numpy as np
import re
from train_model import TrainModel
from utils import load_images

images, filenames = load_images('HBTNaligned', as_array=True)
identities = [re.sub('[0-9]', '', f[:-4]) for f in filenames]
print(set(identities))
thresholds = np.linspace(0.01, 0.1, 100)
tm = TrainModel('models/trained_fv.h5', 0.2)
tau, f1, acc = tm.best_tau(images, identities, thresholds)
print(tau)
print(f1)
print(acc)
Ejemplo n.º 10
0
 def run(self):
     TrainModel(self.config).train()
Ejemplo n.º 11
0
#!/usr/bin/env python3

import os
import numpy as np
import tensorflow as tf
from train_model import TrainModel
from utils import load_images, load_csv, generate_triplets

images, filenames = load_images('HBTNaligned', as_array=True)
triplet_names = load_csv('FVTriplets.csv')
A, P, N = generate_triplets(images, filenames, triplet_names)
triplets = [A[:-2], P[:-2], N[:-2]]

tm = TrainModel('models/face_verification.h5', 0.2)
tm.train(triplets, epochs=1)
base_model = tm.save('models/trained_fv.h5')
print(base_model is tm.base_model)
print(os.listdir('models'))
Ejemplo n.º 12
0
parser = argparse.ArgumentParser()
parser.add_argument('-m',
                    '--mode',
                    required=True,
                    help='Run training mode or testing mode')
args = parser.parse_args()
mode = args.mode

if mode == 'train':
    audio_path = input('Please enter the audio files path: ')
    csv_file = input('Please enter the csv_file path: ')
    destination_folder = input(
        'Please enter the folder where the spectrograms will be saved: ')
    train_data = input(
        'Please enter the name of the npy file (train_data.npy): ')
    prepare_data(audio_path=audio_path,
                 destination_folder=destination_folder,
                 csv_file=csv_file,
                 npy_file=train_data)
    train = TrainModel(data_path=train_data)
    train.run_training()
elif mode == 'test':
    test_audio = input('Please enter the audio file path: ')
    weights_path = input('Please enter the weights path: ')
    json_file = input('Please enter the json file path: ')
    test = TestModel(audio_file=test_audio,
                     weights_path=weights_path,
                     json_path=json_file)
else:
    print('Please select between train or test modes')
Ejemplo n.º 13
0
#!/usr/bin/env python3

import numpy as np
import tensorflow as tf
from train_model import TrainModel
from utils import load_images, load_csv, generate_triplets

images, filenames = load_images('HBTNaligned', as_array=True)
images = images.astype('float32') / 255

triplet_names = load_csv('FVTriplets.csv')
A, P, N = generate_triplets(images, filenames, triplet_names)
triplets = [A[:-2], P[:-2], N[:-2]]  # to make all batches divisible by 32

tm = TrainModel('models/face_verification.h5', 0.2)
history = tm.train(triplets, epochs=1)
print(history.history)
Ejemplo n.º 14
0
from data_provider import DataProvider
from ck_data_provider import CK_DataProvider
from fer2013_data_provider import FER2013_DataProvider

np.set_printoptions(threshold=np.inf)

LEARNING_RATE = 0.01
BATCH_SIZE = 50
EPOCH = 30
# WEIGHT_DECAY = 0.05

# data_provider = CK_DataProvider(BATCH_SIZE, 0)
data_provider = FER2013_DataProvider(BATCH_SIZE, 0)

cnn = CNN()
cnn_model = TrainModel(data_provider, LEARNING_RATE, EPOCH)
cnn_model.training(cnn, torch.optim.Adam(cnn.parameters(), lr=LEARNING_RATE),
                   nn.CrossEntropyLoss())

test_output = cnn(data_provider.x_validation)
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()

dummy_input = torch.tensor(np.expand_dims([np.random.rand(48, 48)], 1),
                           dtype=torch.float32)
torch.onnx.export(cnn, dummy_input, 'cnn_model.onnx', verbose=True)

onnx_model = onnx.load('cnn_model.onnx')
onnx.checker.check_model(onnx_model)

print('Export cnn_model.onnx complete!')
Ejemplo n.º 15
0
 def __init__(self, working_dir, import_image_path, 
              download_image, aoi_path, username, password, date_range,
              mask_path, out_dir, step, prediction_threshold,
              pos_chip_dir, neg_chip_dir, chip_width, chip_height, 
              augment_pos_chips, augment_neg_chips, save_model, import_model, segmentation):
     """
     
     Parameters
     ----------
     working_dif : Directory
         DESCRIPTION: Filepath to working directory
     import_image_path : Directory
         DESCRIPTION: Filepath to image the be used in object detection
     download_image : True or False
         DESCRIPTION: Option to download image from Sentinel API
     aoi_path : Directory
         DESCRIPTION: Filepath to AOI Geo File
     username : String
         DESCRIPTION: Personal username for Sentinel Copernicus Hub
     password : String
         DESCRIPTION: Personal password for Sentinel Copernicus Hub
     data_range : Tuple
         DESCRIPTION: Desired time window for image query ('20190101', '20200101')
     mask_path : Directory
         DESCRIPTION: Filepath to optional mask layer to be used to decreasing consideration area for object detection
     out_dir : Directory
         DESCRIPTION: Filepath to desired output location of results
     step : Integer
         DESCRIPTION: Value that defines the number of pixels moving window predictions jumps by
     prediction_threshold : Float
         DESCRIPTION: Value between 0-1 that defines the consideration minimum for positive prediction
     pos_chip_dir : Directory
         DESCRIPTION: Filepath to positive image chips(contain target object for detection)
     neg_chip_dir : Directory
         DESCRIPTION: Filepath to negative image chips(do not contain target object)
     chip_width : Integer
         DESCRIPTION: Desired output width of output training chip
     chip_height : Integer
         DESCRIPTION: Desired output height of output training chip
     augment_pos_chips : True or False
         DESCRIPTION: Option to create additional training data for positive chips through augmentation
     augment_neg_chips : True or False
         DESCRIPTION: Option to create additional training data for negative chips through augmentation.
     save_model : True or False
         DESCRIPTION: Option to save the model to current directory
     import_model : Directory
         DESCRIPTION: Filepath to saved model
     segmentation : True or False
         DESCRIPTION: Option to perform K-Means image segmentation as preprocess
     
     Notes
     -------
     The purpose of this module is to incorporate training data and trained model to conduct object detection in an image.
     A built in function conducts predictions across the image using a "moving window" where "windows" that have 
     a prediction > user-defined predction_threshold are converted into coordinates and stored in a list.
     Next, the positive prediction coordinates are filtered to produce only one prediction per potential object.
     This is done by analyzing overlapping bounding boxes and keeping the highest scoring prediction.
     The final results are then referenced back to the geographic attribution of the image and converted into 
     well known text(WKT) strings that can be imported as geographic data into GIS software.
     
     """
     self.working_dir = working_dir
     self.import_image_path = import_image_path 
     self.download_image = download_image
     self.aoi_path = aoi_path
     self.username = username
     self.password = password
     self.date_range = date_range
     self.mask_path = mask_path
     self.out_dir = out_dir
     self.step = step
     self.prediction_threshold = prediction_threshold
     self.width = chip_width
     self.height = chip_height
     self.chip_pix = chip_width * chip_height * 3
     self.mask_threshold = self.chip_pix * 0.9
     self.import_model = import_model
     self.train_model = TrainModel(pos_chip_dir, neg_chip_dir, chip_width, chip_height, 
                                   augment_pos_chips, augment_neg_chips, save_model)
     self.segmentation = segmentation
     if self.download_image == True:
         instance = GetSentinelImagery(
             working_dir, aoi_path, username, password, date_range)
         self.import_image_path, self.date, self.time = instance.download_image()
Ejemplo n.º 16
0
class ObjectDetection:
    
    def __init__(self, working_dir, import_image_path, 
                 download_image, aoi_path, username, password, date_range,
                 mask_path, out_dir, step, prediction_threshold,
                 pos_chip_dir, neg_chip_dir, chip_width, chip_height, 
                 augment_pos_chips, augment_neg_chips, save_model, import_model, segmentation):
        """
        
        Parameters
        ----------
        working_dif : Directory
            DESCRIPTION: Filepath to working directory
        import_image_path : Directory
            DESCRIPTION: Filepath to image the be used in object detection
        download_image : True or False
            DESCRIPTION: Option to download image from Sentinel API
        aoi_path : Directory
            DESCRIPTION: Filepath to AOI Geo File
        username : String
            DESCRIPTION: Personal username for Sentinel Copernicus Hub
        password : String
            DESCRIPTION: Personal password for Sentinel Copernicus Hub
        data_range : Tuple
            DESCRIPTION: Desired time window for image query ('20190101', '20200101')
        mask_path : Directory
            DESCRIPTION: Filepath to optional mask layer to be used to decreasing consideration area for object detection
        out_dir : Directory
            DESCRIPTION: Filepath to desired output location of results
        step : Integer
            DESCRIPTION: Value that defines the number of pixels moving window predictions jumps by
        prediction_threshold : Float
            DESCRIPTION: Value between 0-1 that defines the consideration minimum for positive prediction
        pos_chip_dir : Directory
            DESCRIPTION: Filepath to positive image chips(contain target object for detection)
        neg_chip_dir : Directory
            DESCRIPTION: Filepath to negative image chips(do not contain target object)
        chip_width : Integer
            DESCRIPTION: Desired output width of output training chip
        chip_height : Integer
            DESCRIPTION: Desired output height of output training chip
        augment_pos_chips : True or False
            DESCRIPTION: Option to create additional training data for positive chips through augmentation
        augment_neg_chips : True or False
            DESCRIPTION: Option to create additional training data for negative chips through augmentation.
        save_model : True or False
            DESCRIPTION: Option to save the model to current directory
        import_model : Directory
            DESCRIPTION: Filepath to saved model
        segmentation : True or False
            DESCRIPTION: Option to perform K-Means image segmentation as preprocess
        
        Notes
        -------
        The purpose of this module is to incorporate training data and trained model to conduct object detection in an image.
        A built in function conducts predictions across the image using a "moving window" where "windows" that have 
        a prediction > user-defined predction_threshold are converted into coordinates and stored in a list.
        Next, the positive prediction coordinates are filtered to produce only one prediction per potential object.
        This is done by analyzing overlapping bounding boxes and keeping the highest scoring prediction.
        The final results are then referenced back to the geographic attribution of the image and converted into 
        well known text(WKT) strings that can be imported as geographic data into GIS software.
        
        """
        self.working_dir = working_dir
        self.import_image_path = import_image_path 
        self.download_image = download_image
        self.aoi_path = aoi_path
        self.username = username
        self.password = password
        self.date_range = date_range
        self.mask_path = mask_path
        self.out_dir = out_dir
        self.step = step
        self.prediction_threshold = prediction_threshold
        self.width = chip_width
        self.height = chip_height
        self.chip_pix = chip_width * chip_height * 3
        self.mask_threshold = self.chip_pix * 0.9
        self.import_model = import_model
        self.train_model = TrainModel(pos_chip_dir, neg_chip_dir, chip_width, chip_height, 
                                      augment_pos_chips, augment_neg_chips, save_model)
        self.segmentation = segmentation
        if self.download_image == True:
            instance = GetSentinelImagery(
                working_dir, aoi_path, username, password, date_range)
            self.import_image_path, self.date, self.time = instance.download_image()

    
    def create_tensor(self):
        """
        
        Returns
        -------
        im_tensor : Array of uint8
            DESCRIPTION: 3-D array version of input image

        """
        print('\n >>> CREATING TENSOR')
        if self.aoi_path:
            image_path = self.import_image_path
            image = rasterio.open(image_path)
            crs = str(image.crs).split(':')[1] 
            shape = gpd.read_file(self.aoi_path)
            shape = shape.to_crs({'init': 'epsg:{}'.format(crs)})
            print('\n >>> CROPPING IMAGE')
            with fiona.open(self.aoi_path, "r") as shape:
                    geoms = [feature["geometry"] for feature in shape]
                
                    clipped_image, out_transform = mask(image, geoms, crop=True)
                
            im_tensor = clipped_image.transpose(1, 2, 0)
            im_x_res = abs(out_transform[0])
            im_y_res = abs(out_transform[4])
            left = out_transform[2]
            top = out_transform[5]
            
        else:
            image = rasterio.open(self.import_image_path)
            crs = str(image.crs).split(':')[1] 
            im_width = image.width
            im_height = image.height
            left, bottom, right, top = image.bounds
            im_x_res = (right-left)/im_width
            im_y_res = (top-bottom)/im_height  
            im_tensor = image.read()
            im_tensor = im_tensor.transpose(1, 2, 0)   
        
        if self.mask_path:
            with fiona.open(self.mask_path, "r") as shapefile:
                geoms = [feature["geometry"] for feature in shapefile]
            
                out_image, out_transform = rasterio.mask.mask(image, geoms, invert=True)
                
                im_tensor = out_image.transpose(1, 2, 0)
        print('\n     Complete')
        return im_tensor, left, top, im_x_res, im_y_res, crs
    
       
    def moving_window(self, x, y, im_tensor):
        """
        
        Parameters
        ----------
        x : Integer
            DESCRIPTION: Current x position in image
        y : Integer
            DESCRIPTION: Current y position in image
        im_tensor : Array of uint8
            DESCRIPTION: 3-D array version of input image

        Returns
        -------
        window : Array of uint8
            DESCRIPTION: Image patch from larger image to run prediction on

        """
        window = np.arange(3 * self.width * self.height).reshape(3, self.width, self.height)
        for i in range(self.width):
            for j in range(self.height):
                window[0][i][j] = im_tensor[0][y+i][x+j]
                window[1][i][j] = im_tensor[1][y+i][x+j]
                window[2][i][j] = im_tensor[2][y+i][x+j]
        window = window.reshape([-1, 3, self.width, self.height])
        window = window.transpose([0,2,3,1])
        window = window / 255
        return window     
    
    def k_means_segmentation(self, im_tensor):
        tensor = im_tensor.transpose(1, 2, 0)
        img=cv2.cvtColor(tensor,cv2.COLOR_BGR2RGB)
        vectorized = img.reshape((-1,3))
        vectorized = np.float32(vectorized)
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        K = 3
        attempts=10
        ret,label,center=cv2.kmeans(vectorized,K,None,criteria,attempts,cv2.KMEANS_PP_CENTERS)
        center = np.uint8(center)
        res = center[label.flatten()]
        result_image = res.reshape((img.shape))
        
        plt.figure(1, figsize = (15, 30))
        plt.subplot(3, 1, 1)
        plt.imshow(result_image)

        
        unique, counts = np.unique(result_image, return_counts=True)

        size = self.width
        half = size/2
        
        y_shape = result_image.shape[0]
        x_shape = result_image.shape[1]
        test_areas = []
        target = unique[-1]
        segment_pix = np.where(result_image == target)
        for i in range(0, len(segment_pix[0])):
            
            row = segment_pix[0][i]
            col = segment_pix[1][i]
            
            if row <= half:
                y_min = 0
                y_max = size
            if row >= half:
                y_min = row - half
                if row + half >= y_shape:
                    y_max = y_shape
                    y_min = y_max - size
                else: 
                    y_max = row + half
                
            if col <= half:
                x_min = 0
                x_max = size
            if col >= half:    
                x_min = col - half
                if col + half >= x_shape:
                    x_max = x_shape
                    x_min = x_max - size
                else:
                    x_max = col + half
            
            bounds = [y_min, y_max, x_min, x_max]
            test_areas.append(bounds)
            
        test_areas_set = set(tuple(x) for x in test_areas)
        test_areas = [list(x) for x in test_areas_set]
        
        return test_areas
    
        
    def detect(self, im_tensor):
        """
        
        Parameters
        ----------
        im_tensor : Array of uint8
            DESCRIPTION: 3-D array version of input image

        Returns
        -------
        coordinates : List
            DESCRIPTION: Employing the moving window function, the windows that have > prediction threshold values
            are converted into coordinates that reference position in image array and stored in a list.

        """
        step = self.step; coordinates = []
        im_height = im_tensor.shape[1]
        im_width = im_tensor.shape[2]
        if self.import_model:
            model = keras.models.load_model(self.import_model)
        else:
            model = self.train_model.execute()
        
        print('\n >>> CONDUCTING OBJECT DETECTION')
        
        if self.segmentation == True:
            test_areas = self.k_means_segmentation(im_tensor)
            for idx, i in enumerate(test_areas):
                sys.stdout.write('\r{}%  '.format(round(idx/len(test_areas), 3)))
                x = int(i[2])
                y = int(i[0])
                window = self.moving_window(x, y, im_tensor)
                if np.count_nonzero(window) == 0:
                    x += 20
                else:
                    result = model.predict(window)
                    if result[0][1] > self.prediction_threshold:
                        if np.count_nonzero(window) > self.mask_threshold:
                            x_min = x*step
                            y_min = y*step
                            
                            x_max = x_min + 20
                            y_max = y_min + 20
                            
                            coords = [[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]]
                            
                            coordinates.append([coords, result])
        else:                    
            for y in range(int((im_height-(20-step))/step)):
                for x in range(int((im_width-(20-step))/step)):
                    window = self.moving_window(x*step, y*step, im_tensor)
                    if np.count_nonzero(window) == 0:
                        x += 20
                    else:
                        result = model.predict(window)
                        sys.stdout.write('\r     {}%'.format(round(y*step/im_height*100, 1)))
                        if result[0][1] > self.prediction_threshold:
                            if np.count_nonzero(window) > self.mask_threshold:
                                x_min = x*step
                                y_min = y*step
                                
                                x_max = x_min + 20
                                y_max = y_min + 20
                                
                                coords = [[x_min, y_min], [x_max, y_min], [x_max, y_max], [x_min, y_max]]
                                
                                coordinates.append([coords, result])
        print('\n     Complete')            
        return coordinates
        
        
    def trim_results(self, coordinates):
        """
        
        Parameters
        ----------
        coordinates : List
            DESCRIPTION: Collection of image array coordinates that a positive predictions for object detection

        Returns
        -------
        trimmed_coordinates : List
            DESCRIPTION: Coordinates are analyzed relative to overlapping coordinate bounds. The highest prediction value
            is kept and the others are discarded. 

        """
        print('\n >>> TRIMMING RESULTS')
        deleted_items = []   
        for (i, item) in enumerate(coordinates):
            poly_i = Polygon(item[0])
            for (j, comp) in enumerate(coordinates):
                if abs(item[0][0][0] - comp[0][0][0]) < 20:
                    poly_j = Polygon(comp[0])
                    intersection = poly_i.intersection(poly_j)
                    iou = intersection.area / 400
                    
                    if iou > 0.1 and item != comp:
                        
                        if item[1][0][1] > comp[1][0][1]:
                            deleted_items.append(comp)
                        else:
                            deleted_items.append(item)
                        trimmed_coordinates = [e for e in coordinates if e not in deleted_items]
        print('\n     Complete')
        return trimmed_coordinates
        
        
        
    def create_geo_output(self, trimmed_coordinates, left, top, im_x_res, im_y_res, crs):
        """
        
        Parameters
        ----------
        trimmed_coordinates : List
            DESCRIPTION: Final collection of positive predictions

        Returns
        -------
        wkt_strings : List
            DESCRIPTION: Image array coordinates are transformed back to original geographic coordinates of input image.
            Values are stored as strings that are capable of being loaded as geographic features in GIS software.

        """
        print('\n >>> CREATING GEOGRAPHIC OUTPUT')
        wkt_strings = []
        for i in trimmed_coordinates:
            x_min = (i[0][0][0] * im_x_res) + left
            y_min = top - (i[0][0][1] * im_y_res)
            x_max = (i[0][1][0] * im_x_res) +left
            y_max = top - (i[0][2][1] * im_y_res)
            
            wkt = 'POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}))'.format(
                x_min, y_min, x_max, y_min, x_max, y_max, x_min, y_max, x_min, y_min)
            wkt_strings.append(wkt)
        print('\n     Complete')    
        return wkt_strings
        
        
    
    def paint_box(self, x, y, border_width, im_tensor):   
        """
        
        Parameters
        ----------
        x : Integer
            DESCRIPTION: x position of final coordinates predictions
        y : Integer
            DESCRIPTION y position of final coordinates predictions
        border_width : Integer
            DESCRIPTION: Input for visual border thickness to be "painted" on image
        im_tensor : Array of uint8
            DESCRIPTION: 3-D array version of input image

        Returns
        -------
        None, this method "paints" bounding boxes on original image to display prediction results

        """
        for i in range(self.width):
            for ch in range(3):
                for th in range(border_width):
                    im_tensor[ch][y+i][x-th] = -1
    
        for i in range(self.width):
            for ch in range(3):
                for th in range(border_width):
                    im_tensor[ch][y+i][x+th+20] = -1
            
        for i in range(self.width):
            for ch in range(3):
                for th in range(border_width):
                    im_tensor[ch][y-th][x+i] = -1
            
        for i in range(self.width):
            for ch in range(3):
                for th in range(border_width):
                    im_tensor[ch][y+th+20][x+i] = -1
    
    
    def show_results(self, trimmed_coordinates, im_tensor):
        """
        
        Parameters
        ----------
        trimmed_coordinates : List
            DESCRIPTION: Final collection of positive predictions
        im_tensor : Array of uint8
            DESCRIPTION: 3-D array version of input image

        Returns
        -------
        None, this method executes the paint_box method and plots the results

        """
        print('\n >>> SHOWING RESULTS')
        
        for e in trimmed_coordinates:
            try:
                self.paint_box(e[0][0][0], e[0][0][1], 2, im_tensor)
            except IndexError:
                pass
        
        im_tensor = im_tensor.transpose(1, 2, 0)
        plt.figure(1, figsize = (15, 30))
        
        plt.subplot(3, 1, 1)
        plt.imshow(im_tensor)
        print('\n     Check Your Plots')
        plt.show()
        
        
    def execute(self):
        print('\n ~~~EXECUTION IN-PROGRESS~~~')
        im_tensor, left, top, im_x_res, im_y_res, crs = self.create_tensor()
        im_tensor = im_tensor.transpose(2,0,1)
        coordinates = self.detect(im_tensor)
        trimmed_coordinates = self.trim_results(coordinates)
        wkt_strings = self.create_geo_output(trimmed_coordinates, left, top, im_x_res, im_y_res, crs)
        self.show_results(trimmed_coordinates, im_tensor)
        
        print('\n >>> EXPORTING DATA')
        df = pd.DataFrame(wkt_strings, columns=['wkt'])
        df.to_csv(self.out_dir + 'obj_det_wkt-{}_{}.csv'.format(crs,date.today().strftime('%Y%m%d')))
        print('\n ~~~EXECUTION COMPLETE~~~')
import os
import sys
import numpy as np
#sys.path.insert(0, '/home/ec2-user/spell_correction_keras/code/*')
sys.path.insert(0, '/workspace/spell_correction_keras/code/*')
from train_model import TrainModel

inverse = False

model_helper = TrainModel(inverse)
model = model_helper.create_model()

has_model = sum([
    True if file.startswith('weight') else False
    for file in os.listdir('/home/ec2-user/spell_correction_keras/data/')
])
if has_model == 0:
    model_helper.train_model(model)
else:
    #filename = '/home/ec2-user/spell_correction_keras/data/weights-improvement-00-1.0991.hdf5'
    filename = '/workspace/spell_correction_keras/data/weights-improvement-00-1.0991.hdf5'
    model.load_weights(filename)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

results = model.predict(model_helper.x_test)
print(results[0:2])
predictions = [[np.argmax(word) for word in sentence] for sentence in results]
sums = [[sum(word) for word in sentence] for sentence in results]
print('arg data: ', predictions)
Ejemplo n.º 18
0
#!/usr/bin/env python3

import numpy as np
import re
from train_model import TrainModel
from utils import load_images

images, filenames = load_images('HBTNaligned', as_array=True)
images = images.astype('float32') / 255

identities = [re.sub('[0-9]', '', f[:-4]) for f in filenames]
print(set(identities))
thresholds = np.linspace(0.05, 1, 96)
tm = TrainModel('models/face_verification.h5', 0.2)
print(tm.summary())
tau, f1, acc = tm.best_tau(images, identities, thresholds)
print(tau)
print(f1)
print(acc)
Ejemplo n.º 19
0
    global args

    # Create root save model directory if it doesn't exist
    if not os.path.exists(EXPERIMENTS_DIR):
        os.mkdir(EXPERIMENTS_DIR)

    # Create new experiment if exp number was not specified
    if args['exp_no'] == -1:
        increm_experiment_dir()

    exp_dir = os.path.join(EXPERIMENTS_DIR, str(args['exp_no']))
    if not os.path.exists(exp_dir):
        os.mkdir(exp_dir)
        save_args()
    else:
        load_args()


if __name__ == '__main__':
    if args['test']:
        model = TestModel(args['exp_no'], args['cpu'], args['device'])
        model.test()
    else:
        init_exp_dir()
        if args['model'] == 'xgboost':
            model = XGBoostTrainer(**args)
            model.train()
        else:
            model = TrainModel(**args)
            model.train()
#!/usr/bin/env python3

import numpy as np
from train_model import TrainModel

tm = TrainModel('models/face_verification.h5', 0.2)

np.random.seed(0)
y_true = np.random.randint(0, 2, 10)
y_pred = np.random.randint(0, 2, 10)
print(tm.f1_score(y_true, y_pred))
print(tm.accuracy(y_true, y_pred))
Ejemplo n.º 21
0
from dataset import Dataset
from train_model import TrainModel


T1_path = 'Data/t1_icbm_normal_1mm_pn0_rf0.mnc'
T2_path = 'Data/t2_icbm_normal_1mm_pn0_rf0.mnc'
PD_path = 'Data/pd_icbm_normal_1mm_pn0_rf0.mnc'
GT_path = 'Data/phantom_1.0mm_normal_crisp.mnc'

# Initialize instance for input data
ds = Dataset()

ds.load_data(T1_path, 'T1', norm=True)
ds.load_data(T2_path, 'T2', norm=True)
ds.load_data(PD_path, 'PD', norm=True)
ds.load_data(GT_path, 'GT')

# Generate training and validation dataset
CSF_mask = ds.get_mask(ds.GT, values=1, label=1)
GM_mask = ds.get_mask(ds.GT, values=[2, 8], label=2)
WM_mask = ds.get_mask(ds.GT, values=3, label=3)
GT_mask = CSF_mask + GM_mask + WM_mask

ds.group_data(GT_mask)

# Training the model
tm = TrainModel(ds)
tm.train_model(epochs=15, iters=50,
               batch_size=500, learning_rate=3e-4)
Ejemplo n.º 22
0
#!/usr/bin/env python3

import numpy as np
import tensorflow as tf
from train_model import TrainModel
from utils import load_images, load_csv, generate_triplets

images, filenames = load_images('HBTNaligned', as_array=True)
images = images.astype('float32') / 255

triplet_names = load_csv('FVTriplets.csv')
triplets = generate_triplets(images, filenames, triplet_names)

tm = TrainModel('models/face_verification.h5', 0.2)
tm.training_model.summary()
losses = tm.training_model.predict(triplets, batch_size=1)
print(losses.shape)
print(np.mean(losses))
Ejemplo n.º 23
0
    def train_new_model():
        run = True
        COLOR_INACTIVE = pygame.Color('lightskyblue3')
        COLOR_ACTIVE = pygame.Color('dodgerblue2')

        class InputBox:
            def __init__(self, x, y, w, h, text=''):
                self.rect = pygame.Rect(x, y, w, h)
                self.color = COLOR_INACTIVE
                self.text = text
                self.txt_surface = FONT.render(text, True, self.color)
                self.active = False

            def handle_event(self, event):
                nonlocal run
                if event.type == pygame.MOUSEBUTTONDOWN:
                    # If the user clicked on the input_box rect.
                    if self.rect.collidepoint(event.pos):
                        # Toggle the active variable.
                        self.active = not self.active
                    else:
                        self.active = False
                    # Change the current color of the input box.
                    self.color = COLOR_ACTIVE if self.active else COLOR_INACTIVE
                if event.type == pygame.KEYDOWN:
                    if self.active:
                        # if event.key == pygame.K_RETURN:
                        #     name = self.text
                        #     run = False

                        if event.key == pygame.K_BACKSPACE:
                            self.text = self.text[:-1]
                        else:
                            self.text += event.unicode
                        # Re-render the text.
                        self.txt_surface = FONT.render(self.text, True,
                                                       self.color)

            def update(self):
                # Resize the box if the text is too long.
                width = max(200, self.txt_surface.get_width() + 10)
                self.rect.w = width

            def draw(self, screen):
                # Blit the text.
                screen.blit(self.txt_surface,
                            (self.rect.x + 5, self.rect.y + 5))
                # Blit the rect.
                pygame.draw.rect(screen, self.color, self.rect, 2)

        name = InputBox(W // 2 - 100, 150, 800, 40, 'network')
        outputs_box = InputBox(W // 2 - 100, 300, 800, 40)
        imgs_box = InputBox(W // 2 - 100, 450, 800, 40, '500')
        error_str = ''

        def collected():
            nonlocal error_str
            nonlocal run
            nonlocal model_name, outputs, num_imgs
            num_img_str = imgs_box.text
            name_str = name.text
            outputs_str = outputs_box.text

            def RepresentsInt(s):
                try:
                    int(s)
                    return True
                except ValueError:
                    return False

            models = get_models()

            if name_str == '' or num_img_str == '' or outputs_str == '':
                error_str = "Fill in all input boxes"
            elif ' ' in name_str:
                error_str = 'Name can\'t contain spaces'
            elif not RepresentsInt(outputs_str) or not RepresentsInt(
                    num_img_str):
                error_str = 'Fill in gaps with correct type'
            elif name_str + '.pth' in models:
                error_str = "This name is already used"
            else:
                error_str = ''
                outputs = int(outputs_str)
                num_imgs = int(num_img_str)
                model_name = name_str
                run = False

        def go_back():
            import main
            main.main()

        text = FONT.render('Name your model', 1, (255, 0, 0))
        text2 = FONT.render('num outputs', 1, (255, 0, 0))
        text3 = FONT.render('num imgs', 1, (255, 0, 0))

        while run:
            CLOCK.tick(FPS)
            pygame.display.update()
            WIN.fill(0)
            create_button('<--', (128, 128, 128), (200, 200, 200),
                          30,
                          30,
                          50,
                          50,
                          action=go_back)
            WIN.blit(text, (W // 2 - text.get_width() // 2, 100))
            WIN.blit(text2, (W // 2 - text2.get_width() // 2, 250))
            WIN.blit(text3, (W // 2 - text3.get_width() // 2, 400))
            if error_str != '':
                text4 = FONT.render(error_str, 1, (0, 0, 255))
                WIN.blit(text4, (W // 2 - text4.get_width() // 2, 50))
            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    run = False
                    sys.exit()
                name.handle_event(event)
                outputs_box.handle_event(event)
                imgs_box.handle_event(event)

            outputs_box.update()
            outputs_box.draw(WIN)
            name.update()
            name.draw(WIN)
            imgs_box.update()
            imgs_box.draw(WIN)

            create_button('Continue', (0, 128, 0), (0, 255, 0),
                          W // 2 - 200,
                          H - 250,
                          400,
                          80,
                          action=collected)

        network = Network(outputs)
        CollectFramesModule(outputs, num_imgs)
        TrainModel(outputs, network, model_name)
        models = get_models()
        index = models.index(model_name + '.pth')
        Predict(model_name + '.pth', index)