Esempio n. 1
0
import config_etc

import scipy.misc

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"


def get_shape(text, input):
    sess = tf.InteractiveSession()

    print("{}shape : {}".format(text, sess.run(tf.shape(input))))
    sess.close()


dataG = DataGen()

# TODO loaded images numpy array.
rgb_images = np.array(dataG.load_images())
fg_images = np.array(dataG.load_labels())

# TODO: change 0 value to 1, becuase pydefcrf do not use 0
fg_images = np.where(fg_images == 0, 32, fg_images)

# reshape
fg_images = np.reshape(
    fg_images, [fg_images.shape[0], fg_images.shape[1], fg_images.shape[2], 1])

print("rgb_images : " + str(np.shape(rgb_images)))
print("fg_images : " + str(np.shape(fg_images)))
import os
from DataGen import DataGen
from placeHolders import placeHolders
import config_etc
import matplotlib.pyplot as plt
import scipy.misc
import datetime
import time

current_milli_time = lambda: int(round(time.time() * 1000))

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

## ready dataset .
dataG = DataGen()

# loaded images numpy array.
rgb_images = np.array(dataG.load_images())
fg_images = np.array(dataG.load_labels())

# reshape
fg_images = np.reshape(
    fg_images, [fg_images.shape[0], fg_images.shape[1], fg_images.shape[2], 1])

print("rgb_images : " + str(np.shape(rgb_images)))
print("fg_images : " + str(np.shape(fg_images)))

# create input place hodler and apply.
ph = placeHolders(input_images=rgb_images, input_labels=fg_images)
Esempio n. 3
0
    def trainLSTM(self, train, test):
        '''
        Training, Testing and Evaluation of LSTM.
        '''
        datagen = DataGen(self.data_file_name_train, self.data_file_name_test)
        # sftmx_layer = F.log_softmax()
        rolling_loss = 0
        loss_count = 0
        for epoch in range(self.num_epochs):
            correct = 0
            total = 0
            train_auc = 0
            count = 0
            st_time = time.time()
            for i, t in enumerate(train):
                XX, yy = datagen.DataCreatorSequence(t,
                                                     'train')  # hdf_file_path
                # print('XX.shape = ', XX.shape)
                for j in range(0, XX.shape[0], 100):
                    # print('j = ', j)
                    X, y_true = XX[j:(j + 100), :, :], yy[j:(j + 100), :, :]
                    if X.shape[0] == 0:
                        continue
                    # print(X.shape, y.shape)
                    inputs = Variable(torch.FloatTensor(X).to(self.device))
                    y = np.argmax(y_true, axis=2)
                    y_oh = Variable(
                        torch.LongTensor(y.reshape(
                            y.shape[0] * y.shape[1])).to(self.device))
                    # print('y_oh.shape: ', y_oh.shape)
                    # print('Inputs.shape = ', inputs.shape)
                    self.optimizer.zero_grad()
                    # Forward + Backward + Optimize
                    outputs = self.model(inputs)
                    # print('y_oh.shape: ', y_oh.shape)
                    # print(y_oh)
                    # print('outputs.shape: ', outputs.shape)

                    loss = self.criterion_lstm(outputs,
                                               y_oh)  ##### Check criterion
                    loss.backward()
                    self.optimizer.step()
                    # print(outputs.shape)
                    # print('loss value = ', loss.item())
                    #print(outputs.shape)
                    y_true = y_true.reshape((y.shape[0] * y_true.shape[1], -1))
                    # print(y_true)
                    # print("Before sending, shape of y_true and outputs : ", y_true.shape, outputs.shape)
                    auc = self.GetAUCScore(y_true,
                                           outputs.data.cpu().numpy(),
                                           self.concerned_class)
                    # print(auc)
                    rolling_loss += loss.item()
                    loss_count += 1
                    if auc > 0:
                        train_auc += auc
                        count += 1
                if (i % 100) == 0:
                    self.loss_values.append(float(rolling_loss / loss_count))
                    print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f, Roc: %.4f' % (epoch+1, self.num_epochs, \
                            i+1, len(train), float(rolling_loss/loss_count), (train_auc/count) if count > 0 else 0))
            self.training_auc[epoch] = float(train_auc / count)

            total = 0
            correct = 0
            count = 0
            count1 = 0
            test_auc_val1 = 0
            test_auc_val = 0
            for i, t in enumerate(test):
                XX, yy = datagen.DataCreatorSequence(t,
                                                     'test')  # hdf_file_path
                # print('XX.shape = ', XX.shape)
                for j in range(0, XX.shape[0], 100):
                    # print('j = ', j)
                    X, y_true = XX[j:(j + 100), :, :], yy[j:(j + 100), :, :]
                    if X.shape[0] == 0:
                        continue
                    # print(X.shape, y.shape)
                    inputs = Variable(torch.FloatTensor(X).to(self.device))
                    y = np.argmax(y_true, axis=2)
                    y_oh = Variable(
                        torch.LongTensor(y.reshape(
                            y.shape[0] * y.shape[1])).to(self.device))
                    # print('y_oh.shape: ', y_oh.shape)
                    # Forward + Backward + Optimize
                    outputs = self.model(inputs)
                    loss = self.criterion_lstm(outputs, y_oh)
                    # print('y_oh.shape: ', y_oh.shape)
                    # print(y_oh)
                    # print('outputs.shape: ', outputs.shape)
                    # print(outputs.shape)
                    # print(loss.data)
                    #print(outputs.shape)
                    y_true = y_true.reshape((y.shape[0] * y_true.shape[1], -1))
                    #print(y_true.shape)
                    auc = self.GetAUCScore(y_true,
                                           outputs.data.cpu().numpy(),
                                           self.concerned_class,
                                           is_transition=False,
                                           save_results=True)
                    auc1 = self.GetAUCScore(y_true,
                                            outputs.data.cpu().numpy(),
                                            1,
                                            is_transition=True,
                                            save_results=True)
                    # print(auc)
                    if auc1 > 0:
                        test_auc_val1 += auc1
                        count1 += 1
                    if auc > 0:
                        test_auc_val += auc
                        count += 1
                self.test_loss.append(float(loss.item()))
            print('Test AUC 46: ', float(test_auc_val / count))
            print('Test AUC 1: ', float(test_auc_val1 / count1))
            print("Test Loss: ", self.test_loss)
            self.test_auc[epoch] = float(test_auc_val / count)
Esempio n. 4
0
    def trainFF(self, train, test):
        '''
        Training, testing and evaluation of a feed forward network
        '''
        datagen = DataGen(self.data_file_name_train, self.data_file_name_test)
        for epoch in range(self.num_epochs):
            correct = 0
            total = 0
            count = 0
            train_auc = 0
            for i, t in enumerate(train):
                x, y_true = datagen.GenerateIndependentData(t, 'train')
                x = Variable(torch.FloatTensor(x).to(self.device))
                y = Variable(
                    torch.LongTensor(np.argmax(y_true,
                                               axis=1)).to(self.device))
                # Forward + Backward + Optimize
                self.optimizer.zero_grad()
                outputs = self.model(x)

                _, predicted = torch.max(outputs.data, 1)
                # print(predicted, y)
                total += y.size(0)

                correct += (predicted == y.data).sum().item()

                loss = self.criterion(outputs, y)
                # print(list(model.parameters()))
                loss.backward()
                self.optimizer.step()
                # print(list(model.parameters()))
                # print("Before sending, shape of y_true and outputs : ", y_true.shape, outputs.shape)

                auc_score = self.GetAUCScore(y_true,
                                             outputs.data.cpu().numpy(),
                                             self.concerned_class)
                if auc_score != 0:
                    train_auc += auc_score
                    count += 1

                if (i + 1) % 200 == 0:
                    self.loss_values.append(float(loss.item()))
                    print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f, Accuracy: %.4f, AUC: %.4f' % (epoch+1, self.num_epochs, i+1, len(train), \
                    loss.data[0], correct/total, (train_auc/count) if count > 0 else 0))
            self.training_auc[epoch] = float(train_auc / count)
            self.training_accuracy[epoch] = float(correct / total)

            total = 0
            correct = 0
            count = 0
            test_auc = 0
            for i, t in enumerate(test):
                x, y_true = datagen.GenerateIndependentData(t, 'test')
                x = Variable(torch.FloatTensor(x).to(self.device))
                y = Variable(
                    torch.LongTensor(np.argmax(y_true,
                                               axis=1)).to(self.device))
                outputs = self.model(x)
                loss = self.criterion(outputs, y)

                _, predicted = torch.max(outputs.data, 1)
                # print(predicted, y)
                total += y.size(0)
                correct += (predicted == y).sum().item()
                # print("Before sending, shape of y_true and outputs : ", y_true.shape, outputs.shape)

                auc_score = self.GetAUCScore(y_true,
                                             outputs.data.cpu().numpy(),
                                             self.concerned_class)
                if auc_score != 0:
                    count += 1
                    test_auc += auc_score
                self.test_loss.append(float(loss.item()))
            print('Test AUC: ', float(test_auc / count))
            print(self.test_loss)
            self.test_accuracy[epoch] = float(correct / total)
            self.test_auc[epoch] = float(test_auc / count)
Esempio n. 5
0
from DataGen import DataGen
from Plotter import Plotter
from Fitter import Fitter
from StatAnalyser import StatAnalyser
import math


def sin(x, amplituda):
    data = []
    for i in x:
        data.append(amplituda * math.sin(i))
    return data


d = DataGen()
print '****************************Funkcja sinus*****************************'

while True:
    std = raw_input(
        'Aby uruchomic program z danymi domyslnymi nacisnij 1\nJesli chcesz wprowadzic wlasne dane nacisnij ENTER \n'
    )
    if (std == ''):
        try:
            pom_okr = float(
                raw_input("Wprowadz ilosc przeprowadzonych pomiarow \n"))
            okr = int(
                raw_input(
                    "Wprowadz ilosc okresow funkcji sin(PELNE OKRESY) \n"))
            if (pom_okr < okr):
                raise ValueError
            zak = float(raw_input("Wprowadz wartosc zaklocen\n"))
Esempio n. 6
0
# seed_num = 2748152
# seed_num = 110628


# seed_num = 7584732 # Case 2, good seednum
# seed_num = 5647597 # Case 2, good seednum
# seed_num = 1551761 # for N = 10000, T = 3000
# seed_num = 7576682 # for N = 20000, T = 5000
# seed_num = 6597310 # for N = 20000, T = 5000 (Case 3)

# seed_num = 5391860 # Case 3



# Generating Observation data
datagen = DataGen(D,N,seed_num)
OBS = datagen.obs_data()

# Externally policy
stoexp = StoExp(D)
JZ = JZ_bound()
[X,Y,Z] = JZ.sepOBS(OBS,D)
obslogit = stoexp.Logit(X,Z)
obsxgb = stoexp.XGB(X,Z)

policy_list = [obslogit,obsxgb]

# Bound construction
JZ = JZ_bound()
[L_obslogit, H_obslogit] = JZ.JZ_bounds(obslogit,OBS,D,N)
Y_obslogit = np.mean(datagen.poly_intv_data(obslogit, Z)['Y'])
import pandas as pd

# sample submissions
sample_output = pd.read_csv('sample_submission.csv', sep=",")
sample_output.head()

# data types for each col
print(sample_output.dtypes)

# Loading and exploring train data
# Column names on the train.csv file and corresponding data types:
from DataGen import DataGen
train_data_generator = DataGen(file_path='train.csv', chunk_size=10000000)
print(train_data_generator.col_names)
print('------------------------------------')

next_batch, end_of_file = train_data_generator.next_batch()

print('Duplicates in "time_to_failure"? {}'.format(
    not next_batch.loc[next_batch['time_to_failure'].duplicated()].empty))

next_batch.head()
# data types for each col
print(next_batch.dtypes)
print('------------------------------------')
print('Duplicates in "time_to_failure"? {}'.format(
    not next_batch.loc[next_batch['time_to_failure'].duplicated()].empty))

import matplotlib.pyplot as plt
# plot second batch of data and zoom
Esempio n. 8
0
from DataGen import DataGen
from Plotter import Plotter
from Fitter import Fitter
from StatAnalyser import StatAnalyser
import math

def sin(x, amplituda):
	data = []
	for i in x:
		data.append(amplituda*math.sin(i))
	return data
d = DataGen()
print '****************************Funkcja sinus*****************************'

while True:
	std = raw_input('Aby uruchomic program z danymi domyslnymi nacisnij 1\nJesli chcesz wprowadzic wlasne dane nacisnij ENTER \n')
	if ( std == ''):
		try:
    			pom_okr = float(raw_input("Wprowadz ilosc przeprowadzonych pomiarow \n"))
    			okr = int(raw_input("Wprowadz ilosc okresow funkcji sin(PELNE OKRESY) \n"))
			if (pom_okr<okr):
				raise ValueError 
    			zak = float(raw_input("Wprowadz wartosc zaklocen\n"))
    			am = float(raw_input("Wprowadz amplitude\n"))
    			przes = float(raw_input("Wprowadz przesuniecie\n"))
			d.setData(przes,okr,pom_okr,am,zak)
			break
		except ValueError:
			print 'Wprowadzono niepoprawne dane - sprobuj jeszcze raz \n'
	elif(std== str(1)):
			break
Esempio n. 9
0
def main():

    # Parameters
    params = {
        'img_rows': img_rows,
        'img_cols': img_cols,
        'num_of_dim': num_of_dim,
        'label_rows': label_rows,
        'label_cols': label_cols,
        'batch_size': batch_size,
        'kernel_crop': kernel_crop,
        'shuffle': True
    }

    name_read = [x for x in listdir(path_sharp) if isfile(join(path_sharp, x))]

    train_generator = DataGen(**params).Generator(name_read, path_sharp,
                                                  path_blur)

    input_dim_blur = (img_rows, img_cols, num_of_dim)
    input_dim_sharp = (label_rows, label_cols, num_of_dim)

    DeblurNet = ShortCutNet().DeblurResidualNet(input_dim_blur, 6)
    #DeblurNet = ShortCutNet().DeblurSHCNet(input_dim_blur, 17)
    DeblurNet.summary()

    input_blur = Input(shape=(input_dim_blur))
    input_sharp = Input(shape=(input_dim_sharp))

    img_deblur = DeblurNet(input_blur)

    # Loss
    loss_regr = Lambda(regr_loss,
                       output_shape=regr_shape)([img_deblur, input_sharp])

    # Model
    model = Model(inputs=[input_blur, input_sharp], outputs=loss_regr)

    # train
    model.compile(loss=identity_loss, optimizer=keras.optimizers.Adadelta())
    model.summary()

    #path_model_save = model_path + 'DeblurRes_{epoch:02d}-{loss:.2f}.hdf5'
    path_model_save = model_path + 'DeblurSHC_{epoch:02d}-{loss:.2f}.hdf5'
    check_point = keras.callbacks.ModelCheckpoint(path_model_save,
                                                  monitor='loss',
                                                  verbose=0,
                                                  save_best_only=False,
                                                  save_weights_only=False,
                                                  mode='auto',
                                                  period=1)
    callbacks = [check_point]

    # Train the model on the dataset
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=int(len(name_read) / batch_size),
                        epochs=epochs,
                        callbacks=callbacks,
                        use_multiprocessing=True,
                        verbose=1)

    model.save(path_weights)