Ejemplo n.º 1
0
 def __init__(self,
              train=True,
              common_params=None,
              solver_params=None,
              net_params=None,
              dataset_params=None):
     if common_params:
         self.device_id = int(common_params['gpus'])
         self.image_size = int(common_params['image_size'])
         self.height = self.image_size
         self.width = self.image_size
         self.batch_size = int(common_params['batch_size'])
         self.num_gpus = 1
         self.restore_model = True
         self.logs_dir = "logs/"
         self.data_dir = "Data_zoo/flowers"
     if solver_params:
         self.learning_rate = float(solver_params['learning_rate'])
         self.moment = float(solver_params['moment'])
         self.max_steps = int(solver_params['max_iterators'])
         self.train_dir = str(solver_params['train_dir'])
         self.lr_decay = float(solver_params['lr_decay'])
         self.decay_steps = int(solver_params['decay_steps'])
     self.train = train
     self.net = Net(train=train,
                    common_params=common_params,
                    net_params=net_params)
     #self.dataset = DataSet(common_params=common_params, dataset_params=dataset_params)
     self.train_images, self.test_images = flowers.read_dataset(
         self.data_dir)
     image_options = {"resize": True, "resize_size": 224, "color": "LAB"}
     self.batch_reader = dataset.BatchDatset(self.train_images,
                                             image_options)
     self.batch_reader_test = dataset.BatchDatset(self.test_images,
                                                  image_options)
Ejemplo n.º 2
0
def read_test_data(path):
    num_clips = int(sys.argv[1])
    labels = pd.read_csv(os.path.join(path,'Label_Map','label.txt'))
    all_clips_name = rd.read_dataset(path,labels,'Test',seed=66,balance=False)
    mean_image = np.load(os.path.join(path,'Data','Train','mean_image.npy'))
    clip_Y,clip_X  = rd.read_minibatch(0,num_clips,all_clips_name,mean_image,'Test')
    return clip_Y,clip_X
Ejemplo n.º 3
0
def get_cnn():
    model = load_model(
        r'E:\GYK\google_tts\save_model\all_bonafid_split_1s_all_vs_all_SS_1_split_1s_all'
    )
    print(model.layers)
    #dense1_layer_model = Model(inputs=model.input, outputs=model.get_layer('global_average_pooling1d_1').output)
    dense1_layer_model = Model(inputs=model.input,
                               outputs=model.get_layer('pooling').output)
    n = 500
    label = []
    TIMIT = read_data.read_dataset(
        r'E:\GYK\google_tts\data\{}'.format('TIMIT_split_1s'))
    print(np.random.choice(range(TIMIT.shape[0]), 500))
    print(TIMIT[1])
    TIMIT = np.array(
        [TIMIT[i] for i in np.random.choice(range(TIMIT.shape[0]), 500)])
    print(TIMIT.shape)
    TIMIT = TIMIT.reshape(-1, TIMIT.shape[1], 1)
    TIMIT = dense1_layer_model.predict(TIMIT)
    label += [1] * TIMIT.shape[0]
    T_Wavenet = read_data.read_dataset(
        r'E:\GYK\google_tts\data\{}'.format('TIMIT_wavnet_split_low2'))
    T_Wavenet = np.array([
        T_Wavenet[i] for i in np.random.choice(range(T_Wavenet.shape[0]), 500)
    ])
    T_Wavenet = T_Wavenet.reshape(-1, T_Wavenet.shape[1], 1)
    T_Wavenet = dense1_layer_model.predict(T_Wavenet)
    label += [2] * T_Wavenet.shape[0]
    Bonafid = read_data.read_dataset(
        r'E:\GYK\google_tts\data\{}'.format('all_bonafid_split_1s'))
    Bonafid = np.array(
        [Bonafid[i] for i in np.random.choice(range(Bonafid.shape[0]), 500)])
    Bonafid = Bonafid.reshape(-1, Bonafid.shape[1], 1)
    Bonafid = dense1_layer_model.predict(Bonafid)
    label += [3] * Bonafid.shape[0]
    ASV_Wavenet = read_data.read_dataset(
        r'E:\GYK\google_tts\data\{}'.format('all_SS_1_split_1s'))
    ASV_Wavenet = np.array([
        ASV_Wavenet[i]
        for i in np.random.choice(range(ASV_Wavenet.shape[0]), 500)
    ])
    ASV_Wavenet = ASV_Wavenet.reshape(-1, ASV_Wavenet.shape[1], 1)
    ASV_Wavenet = dense1_layer_model.predict(ASV_Wavenet)
    label += [4] * ASV_Wavenet.shape[0]
    data = np.vstack((TIMIT, T_Wavenet, Bonafid, ASV_Wavenet))
    return data, label
Ejemplo n.º 4
0
def load_clip_name(path, status, balance):
    labels = pd.read_csv(os.path.join(path, 'Label_Map', 'label.txt'))
    all_clips_name = rd.read_dataset(path,
                                     labels,
                                     status,
                                     seed=66,
                                     balance=balance)
    mean_image = np.load(os.path.join(path, 'Data', 'Train', 'mean_image.npy'))
    return all_clips_name, mean_image
Ejemplo n.º 5
0
    def __init__(self):
        """
            for train and test data, dataset is in form like [[谜面,答案,地信],]
            for valid data, dataset is [[谜面,谜底,选项],]

            variables begin with `raw` are prepared for calculating valid data.
        """
        # make train, valid, test
        self.n_for_1 = 2
        raw_riddles = read_data.read_dataset()  #[[谜面,谜底,选项],]
        random.shuffle(raw_riddles)
        riddles = self.construct_classify_riddle_set(
            raw_riddles)  #[[谜面,答案,地信],]
        one_tenth = len(riddles) // 10
        raw_one_tenth = len(raw_riddles) // 10
        train = riddles[0:one_tenth * 8]
        test = riddles[one_tenth * 8:one_tenth * 9]
        valid = raw_riddles[raw_one_tenth * 9:raw_one_tenth * 10]

        # get voc
        self.riddle_voc, self.ans_voc = read_data.construct_train_data_corpus_vocabulary_dictionary(
            begin=0, end=one_tenth * 8)
        self.riddle_voc_size = len(
            self.riddle_voc
        ) + 1  # two more place, one for padding, one for unseen
        self.ans_voc_size = len(self.ans_voc) + 1
        UNSEEN_riddle = self.riddle_voc_size + 1
        UNSEEN_ans = self.ans_voc_size + 1

        #indexize
        def indexizer(riddle, is_valid=False):
            ret = list()
            ret.append([
                self.riddle_voc[word]
                if word in self.riddle_voc else UNSEEN_riddle
                for word in riddle[0]
            ])
            ret.append(self.ans_voc[riddle[1]] if riddle[1] in
                       self.ans_voc else UNSEEN_ans)
            if is_valid:
                ret.append([
                    self.ans_voc[word] if word in self.ans_voc else UNSEEN_ans
                    for word in riddle[2]
                ])
            else:
                ret.append(riddle[2])
            #print(ret)
            return ret

        self.train = [indexizer(row) for row in train]
        self.valid = [indexizer(row, is_valid=True) for row in valid]
        self.test = [indexizer(row) for row in test]
import mymodel
import sklearn.metrics as sm
from keras.utils import np_utils, multi_gpu_model
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import confusion_matrix
from keras.models import load_model
from keras.optimizers import Adam, RMSprop, Adamax
import matplotlib.pyplot as plt

isSplit = False
# load data
# 16,196
# T = "all_bonafid_split_1s"
T = "TIMIT_split_1s"
X_s = read_data.read_dataset(r'D:\GYK\WaveNet\data\{}'.format(T))

# 14,663
F = "TIMIT_WavNet_split_1s"
X_c = read_data.read_dataset(r'D:\GYK\WaveNet\data\{}'.format(F))
X = np.vstack((X_s, X_c))

# creat label
m = X_s.shape[0]
n = X_c.shape[0]
label.creat_label(m, n)
y = read_data.read_label(r'.\label.txt')

# data preprocess
if isSplit:
    X_train, X_test, y_train, y_test = train_test_split(X,
def prepare_experiment(dataset):

    return read_dataset(dataset['format'], dataset['name'], dataset['labels'])
Ejemplo n.º 8
0
    sample = 0
    score = 0
    for i in range(len(pred)):
        sample += 1
        #print(pred[i])
        #print(true[i])
        #print("--------")
        sample_acc = accuracy_score(true[i], pred[i])
        score += sample_acc
        
    precision = score/sample
    return precision


print("> Loading testing data")
x_test, y_test = rd.read_dataset(args.dataset+"/test/*")

### load model
model = load_model(args.model)
# Testing on testing data
print("\nTest on testing data...")
y_pred = model.predict(x_test)
if ml == True:
    threshold = 0.4
    pred = np.zeros(np.array(y_test).shape, dtype=np.int)
    np.set_printoptions(precision=6, suppress=True)
    for i in range(len(y_pred)):
        for j in range(5):
            if y_pred[i][j] >= threshold:
                pred[i][j] = 1
            else:
Ejemplo n.º 9
0
def main(argv=None):
    keep_probability = tf.placeholder(tf.float32, name='keep_probability')
    image = tf.placeholder(tf.float32,
                           shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3],
                           name='input_image')
    annotation = tf.placeholder(tf.int32,
                                shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1],
                                name='annotation')

    pre_annotation, logits = inference(image, keep_prob=keep_probability)
    tf.summary.image('input_image', image, max_out_puts=2)
    tf.summary.image('ground_truth', annotation, max_outputs=2)
    tf.summary.image('pre_annotation', pre_annotation, max_outputs=2)

    loss = tf.nn.sparse_softmax_cross_entropy_with_logits\
        (logits=logits, labels=tf.squeeze(annotation, aqueeze_dims=[3]), name='entropy')
    tf.summary.scalar('entropy', loss)

    var_list = tf.trainable_variables()

    train_op = Train_op(loss, var_list)

    print('Setting up summary op...')
    summary_op = tf.summary.merge_all()

    print('Setting up read data set')
    train_records, valid_records = scene_parsing.read_dataset(DATA_DIR)
    print(len(train_records))
    print(len(valid_records))

    image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
    train_dataset_reader = dataset.BatchDataset(train_records, image_options)
    valid_dataset_reader = dataset.BatchDataset(valid_records, image_options)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.global_variables_initializer()
        summary_writer = tf.summary.FileWriter('./log_dir', sess.graph)

        for itr in range(MAX_ITERATION):
            train_images, train_annotations = train_dataset_reader.next_batch(
                BATCH_SIZE)
            feed_dict = {
                image: train_images,
                annotation: train_annotations,
                keep_probability: 0.85
            }

            sess.run(train_op, feed_dict=feed_dict)

            if itr % 10 == 0:
                train_loss, summary_str = sess.run([loss, summary_op],
                                                   feed_dict=feed_dict)
                print('Step: %d, Train_loss: %g' % (itr, train_loss))
                # The add_summary function args of global_step(str) is optional.
                summary_writer.add_summary(summary_str, str)

            if itr % 100 == 0:
                gpre_annotation = sess.run(pre_annotation, feed_dict=feed_dict)
                utils.save_image_pre_annotation(gpre_annotation, train_images,
                                                train_annotations)

            if itr % 500 == 0:
                valid_images, valid_annotations = valid_dataset_reader.get_random_batch(
                    BATCH_SIZE)
                valid_loss = sess.run(loss,
                                      feed_dict={
                                          image: valid_images,
                                          annotation: valid_annotations,
                                          keep_probability: 1.0
                                      })
                print("%s ---> Valid loss: %g" %
                      (datetime.datetime.now(), valid_loss))
                saver.save(sess, './models', global_step=itr)
def prepare_experiment(dataset):

    return read_dataset(dataset['format'], dataset['name'], dataset['labels'])
Ejemplo n.º 11
0
    ml = True
    activation = "sigmoid"
    loss = "binary_crossentropy"

print("|-------------Training info-------------")
print("|-Dataset:   ", args.dataset)
print("|-Model:     ", args.model)
print("|-Epoch:     ", args.epoch)
print("|-Batch_size:", args.batch_size)
print("|-Activation:", activation)
print("|-Loss:      ", loss)
print("|---------------------------------------")

### Read data
print("> Loading training data")
x_train, y_train = rd.read_dataset(path + "/train/*")
print("> Loading testing data")
x_test, y_test = rd.read_dataset(path + "/test/*")
x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                  y_train,
                                                  test_size=0.2,
                                                  shuffle=True)

print("Train:", x_train.shape, y_train.shape)
print("Val:", x_val.shape, y_val.shape)
print("Test:", x_test.shape, y_test.shape)
print("Done!")

### Prepare model
img_shape = (224, 224, 3)
num_class = 5
import numpy as np
np.random.seed(1337)
import read_data
import label
import mymodel
from keras.utils import np_utils,multi_gpu_model
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping,ReduceLROnPlateau,ModelCheckpoint
from sklearn.metrics import confusion_matrix
from keras.models import load_model
from keras.optimizers import Adam,RMSprop,Adamax
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix

T = "all_bonafid_split_1s"
X_s=read_data.read_dataset(r'E:\GYK\google_tts\data\{}'.format(T))

# 14,663
F = "all_SS_1_split_1s"
X_c=read_data.read_dataset(r'E:\GYK\google_tts\data\{}'.format(F))

X_test=np.vstack((X_s,X_c))



m = X_s.shape[0]
n = X_c.shape[0]
y_test = [1]*m+[0]*n
# label.creat_label(m,n)
# y_test=read_data.read_label(r'.\label.txt')
Ejemplo n.º 13
0
        h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

        W_fc2 = weight_variable([1024, 2])
        b_fc2 = bias_variable([2])

        y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2

        return y_conv, keep_prob





# Import data

    X, Y, X_test, Y_test = read_data.read_dataset();

    l_data = len(X)
    print len(X), len(Y)
    print X[0].shape, Y[0]
    for i in range(0, 20):
        print Y[i]
        cv2.imshow("person", X[i])
        cv2.waitKey(0)

    print X[0]
    print X[1].shape, Y[1]
    print X[2].shape, Y[2]
    print len(X_test), len(Y_test)
    print X_test[0].shape, Y_test[0]
    print X_test[1].shape, Y_test[1]
Ejemplo n.º 14
0
np.random.seed(1337)
import read_data
import mymodel
from keras.utils import np_utils,multi_gpu_model
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping,ReduceLROnPlateau,ModelCheckpoint
from sklearn.metrics import confusion_matrix

from keras.models import load_model

from keras.optimizers import Adam,RMSprop,Adamax
import matplotlib.pyplot as plt

# load data
# 16,196
X_s=read_data.read_dataset(r'E:\GYK\google_tts\TIMIT_split_1s')

# 14,663
X_c=read_data.read_dataset(r'E:\GYK\google_tts\TIMIT_wavnet_split_low2')
X=np.vstack((X_s,X_c))
y=read_data.read_label(r'.\label.txt')

# data preprocess
X_train, X_test, y_train, y_test_1 = train_test_split(X, y, test_size = 0.2, random_state= 0)

X_train = X_train.reshape(-1, X.shape[1], 1)
X_test = X_test.reshape(-1, X.shape[1], 1)
y_train = np_utils.to_categorical(y_train, num_classes=2)
y_test = np_utils.to_categorical(y_test_1, num_classes=2)
print('...',y_test_1)
print('...', X_train.shape)
Ejemplo n.º 15
0
from model import *
from keras.optimizers import Adam, SGD, Adadelta
from keras.callbacks import ReduceLROnPlateau, LearningRateScheduler
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix, precision_score, recall_score
from sklearn.model_selection import train_test_split
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
import numpy as np
""" Limit memory """
# Auto
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
tf.keras.backend.set_session(sess)

x_train, y_train = rd.read_dataset("./dataset/train_b/*", 1)
x_test, y_test = rd.read_dataset("./dataset/test/*", 1)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, shuffle= True)

input_shape = (224, 224, 3)
num_classes = 4
finalAct = 'sigmoid'

batch_size = 16
epochs = 40

model = densenet(input_shape, num_classes, finalAct)
opt = SGD(lr=0.01)#, decay=0.0001, momentum=0.9, nesterov=True)
#opt = Adam(lr=0.01)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=["accuracy"])
Ejemplo n.º 16
0
import numpy as np
from matplotlib import pyplot
#%matplotlib inline
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

"""
Multi-task learning based on densenet
"""

# Read training and testing data
#
# def read_dataset(loc, augment mode):
#     return img, label

x_train, y_train = rd.read_dataset("./dataset/new/train/*", 1)
x_test, y_test = rd.read_dataset("./dataset/new/test_un/*", 1)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, shuffle= True)
print("/n")
print("Train:", x_train.shape, y_train.shape)
print("Val:", x_val.shape, y_val.shape)
print("Test:", x_test.shape, y_test.shape)
print("Done!")

# Prepare Model
# In model.py
# densenet, densenet_multi, cnn, alexnet, vgg

img_shape = (224, 224, 3)
num_class = 4
final_Act = 'softmax'