Exemplo n.º 1
0
def create_noise(data):
    # print("NOise_FUntion")
    noisy_data = []
    take_even_file = 0
    # Now take Salt and Paper Noise on 46 Picture and Total Image Array = 92(Data)
    for item_1 in range(len(data)):
        if take_even_file % 3 == 0:
            img = data[item_1]
            img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
            sp = Noise.sp_noise(img, 0.05)  # Black Salt pepper
            noisy_data.append(sp)
        take_even_file += 1
    for item_2 in range(len(data)):
        if take_even_file % 3 == 1:  # This take only Half Pictures from Folder
            img = data[item_2]
            img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
            SP = Noise.noisy("s&p", img)  # Color S&P
            noisy_data.append(SP)
        take_even_file += 1
    # Now take Salt and Paper Noise on 46 Picture and Total Image Array = 92(Data)
    for item_3 in range(len(data)):
        if take_even_file % 3 == 2:
            img = data[item_3]
            img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
            gauss = Noise.noisy("gauss", img)
            noisy_data.append(gauss)
        take_even_file += 1
    return noisy_data
Exemplo n.º 2
0
def main():
    print("Start to preprocess training data.")
    target_symbol = [
        'musical_symbol_bass_clef', 'musical_symbol_half_note',
        'musical_symbol_quarter_note', 'musical_symbol_quarter_rest',
        'musical_symbol_g_clef'
    ]
    #sample_path = ["C:\\Users\\san34\\Desktop\\2018_summer\\project\\music_simulation_software\\Scores\\Output\\0213_Symbol\\", "C:\\Users\\san34\\Desktop\\2018_summer\\project\\music_simulation_3D\\CSE5542-Lab1-Solution\\Output\\picture\\0317_OpenGL_RGB\\"];
    sample_path = [
        "C:\\Users\\san34\\Desktop\\2018_summer\\project\\music_simulation_software\\Scores\\Output\\0213_Symbol\\",
        "C:\\Users\\san34\\Desktop\\2018_summer\\project\\music_simulation_3D\\CSE5542-Lab1-Solution\\Output\\picture\\0331_OpenGL_RGB\\"
    ]

    start = 2500
    simulation = 100
    #for i in range(0, 1):
    for i in range(0, len(target_symbol)):
        output_path = "..//Output//0401_1//" + target_symbol[i] + "//0401_"
        print("target_symbol: ", target_symbol[i])
        print("start: ", start, ", end: ", (start + simulation))
        Blur.blur(target_symbol[i], sample_path, output_path, start,
                  simulation)
        Noise.noise(target_symbol[i], sample_path, output_path, start,
                    simulation)
        MorTransform.morTransform(target_symbol[i], sample_path, output_path,
                                  start, simulation)

    print("Finish preprocess training data.")
Exemplo n.º 3
0
def bitsfromimgfieldsnoise(im, key):
    index = 0
    msgfield = 32
    sizeArr = []
    msgArr = []
    threshold = 50
    px = im.load()  # Get the pixels in the image
    sizeindex = 0
    msgindex = 0
    for y in range(16):  #storing size in first 16 pixels of image
        i = y
        j = 0
        p = px[i, j]
        decodelsb(p[0], sizeArr, sizeindex)
        sizeindex += 2

    #needs generateNRandomPixels needs to exclude first 16 pixels, we will reserve that for getting size of msg
    size = AESdome.bitsasint(sizeArr)
    #print(size)
    seed = prngSteg.generateSeed(key)
    rl = Noise.randWithNoise(size, seed, im)

    for x in rl:
        i = x[0]
        j = x[1]
        #if index == size:
        #return msgArr
        p = px[i, j]  # get the r,g,b values for this pixel
        decodelsb(p[0], msgArr, msgindex)
        msgindex += 2

    decrypted = AESdome.decryptmsg(msgArr, key, size //
                                   8)  #size//8 represents bytssize as integer
    return decrypted
Exemplo n.º 4
0
def fill_with_rand_shape(dm, dim, n_iterations=10, blur=True):
    for i in range(n_iterations):
        add_random_shape(dm)
    if blur:
        import Noise
        dm = Noise.blur_filter(dm, dim)
    return dm
Exemplo n.º 5
0
	def simplex(self,octaves=4,freq=0.00388,amp=95.0):
		#makes a heightmap using multiple layers of simplex noise

		self.octaves = octaves
		self.freq = freq
		self.amp = amp

		#load simplex noise object
		N=Noise()

		#get noise for each octave and sum them
		for i in range(self.width):
			for j in range(self.width):
				for t in range(octaves):
					f = freq*(t+1.0)**2
					a= amp/((t+1.0)**3.4)
					self.array[i][j]+= N.simplex3d(i*f,j*f,5.0)*a+30.0
Exemplo n.º 6
0
 def __init__(self, a):
     super(Perturb, self).__init__()
     self.a = a
     # How do we accept or generate a seed?
     # Needs to be consistent for a given object, or animations will
     # fail badly.
     self.noise = Noise.Noise()
     # How much to scale the perturbation
     self.size = 0.1
Exemplo n.º 7
0
 def finalLayer(self, y, n_iters=1, learner_size=200):
     print "Final Layer"
     sigmoid = Layers.SigmoidLayer(self.X.shape[1],
                                   learner_size,
                                   noise=Noise.GaussianNoise(0.1))
     softmax = Layers.SoftmaxLayer(learner_size, y.shape[1])
     trainer = Trainer()
     sigmoid, softmax = trainer.train([sigmoid, softmax], self.X, y,
                                      n_iters)
     self.Layers.append(sigmoid)
     self.Layers.append(softmax)
Exemplo n.º 8
0
def modimagefieldsnoise(im, msg, key):
    #print(im.format, im.size, im.mode)
    #print(im.size[0])
    #im.show()
    msgfield = 32  #sets first 32/2 or 16 pixels to store size of msg
    msgArr = AESdome.bitfield(int.from_bytes(
        msg,
        byteorder='big'))  #converts into array of bits [0,0,0,1,0,1]etc...
    size = len(msgArr)
    if (size > 2**msgfield):
        return 'Message is too large to store in image'
    sizeindex = 0
    msgindex = 0
    sizeArr = AESdome.intasbits(size,
                                msgfield)  #converts the int to array of bits
    threshold = 50
    #print(msgArr)
    #print(len(r))
    #print(len(msgArr))
    seed = prngSteg.generateSeed(key)
    rl = Noise.randWithNoise(size, seed, im)
    px = im.load()  # Get the pixels in the image
    for y in range(16):  #storing size in first 16 pixels of image
        i = y
        j = 0
        p = px[i, j]
        r = encodelsb(p[0], sizeArr, sizeindex)
        g = p[1]
        b = p[2]
        px[i, j] = (r, g, b)
        sizeindex += 2

    for x in rl:  #storing message in msgsize/2 random pixels (2lsb in each random pixel)
        i = x[0]
        j = x[1]
        #print(i)
        #print(j)
        #if index == size:
        #im.save('tmp1.png')
        #return
        #print(px[0,0])
        #print(px[i,j])
        p = px[i, j]  # get the r,g,b values for this pixel
        r = encodelsb(p[0], msgArr, msgindex)
        msgindex += 2
        g = p[1]
        b = p[2]
        px[i, j] = (r, g, b)
        #print(isnthset(r,1))
        #print(isnthset(r,0))
    #im.show()
    im.save('tmp1.png')  # You can save the modified image
 def pre_train(self, X, epochs=1, noise_rate=0.3):
     self.structure = numpy.concatenate([[X.shape[1]], self.structure])
     self.X = X
     trainer = Trainer()
     print("Pre-training: ")  #, self.__repr__()
     for i in range(len(self.structure) - 1):
         #print ("Layer: %dx%d"%( self.structure[i], self.structure[i+1]))
         s1 = Layers.SigmoidLayer(self.structure[i],
                                  self.structure[i + 1],
                                  noise=Noise.SaltAndPepper(noise_rate))
         s2 = Layers.SigmoidLayer(self.structure[i + 1], self.X.shape[1])
         s1, s2 = trainer.train([s1, s2], self.X, self.X, epochs)
         self.X = s1.activate(self.X)
         self.Layers.append(s1)
Exemplo n.º 10
0
def serial_decrypt(code, decy_code):
    bin_command = bin(decy_code)[2:10]
    offset_str = bin_command[4:]
    offset = 3 + int(offset_str, 2)
    cmd = bin_command[3::-1]

    for i in range(4):
        if cmd[i] == '1':
            if int(i) % 2 == 0:
                code = Noise.de_swt(code, offset)
            else:
                code = caesar.dec(code, offset)

    return code
Exemplo n.º 11
0
import Noise as noise
import pickle

#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#DATA SET WITHOUT NOISE
#1. Generate data set without noise
#2. Set aside 2/3 for validation
#3. Train algorithms on test data
#3b. Plot test error versus iteration
#4. Compare classifiers performance on validation set
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

num_dim = 15
num_data = 1000

artificial_data, artificial_labels, pt = noise.label_points(num_dim, num_data)

gaussian_data_05, gaussian_labels_05 = noise.generate_noise(
    artificial_data,
    artificial_labels,
    noise_type='gaussian',
    prop=0.05,
    point=pt)
gaussian_data_1, gaussian_labels_1 = noise.generate_noise(
    artificial_data,
    artificial_labels,
    noise_type='gaussian',
    prop=0.1,
    point=pt)
gaussian_data_2, gaussian_labels_2 = noise.generate_noise(
    artificial_data,
Exemplo n.º 12
0
    def learning_iteration(self):
        """Does 1 iteration.
            Basically retrives the rendered image and pose from queue. Preprocess/Purturb it
            and feed it into caffe for training.
        """
        # print 'DoCaffeTrainng'
        startTime = time.time()
        # print 'q_imStack.qsize() : ', self.q_imStack.qsize()
        # print 'q_labelStack.qsize() : ', self.q_labelStack.qsize()

        # if too few items in queue do not proceed with iterations
        if self.q_imStack.qsize() < 16 * 5:
            return None

        batchsize = self.solver.net.blobs['data'].data.shape[0]
        # print 'batchsize', batchsize
        # print "self.solver.net.blobs['data'].data", self.solver.net.blobs['data'].data.shape
        # print "self.solver.net.blobs['label_x'].data",self.solver.net.blobs['label_x'].data.shape
        for i in range(batchsize):
            im = self.q_imStack.get()  #320x240x3
            y = self.q_labelStack.get()

            im_noisy = Noise.noisy('gauss', im)
            im_gry = np.mean(im_noisy, axis=2)

            # cv2.imwrite( str(i)+'__.png', x )

            cencusTR = ct.censusTransform(im_gry.astype('uint8'))
            edges_out = cv2.Canny(cv2.blur(im_gry.astype('uint8'), (3, 3)),
                                  100, 200)

            self.solver.net.blobs['data'].data[i, 0, :, :] = self.zNormalized(
                im_gry.astype('float32'))
            self.solver.net.blobs['data'].data[i, 1, :, :] = self.zNormalized(
                cencusTR.astype('float32'))
            self.solver.net.blobs['data'].data[i, 1, :, :] = self.zNormalized(
                edges_out.astype('float32'))
            self.solver.net.blobs['label_x'].data[i, 0] = y[0]
            self.solver.net.blobs['label_y'].data[i, 0] = y[1]
            self.solver.net.blobs['label_z'].data[i, 0] = y[2]
            self.solver.net.blobs['label_yaw'].data[i, 0] = y[3]
            # print y[0], y[1], y[2], y[3]

        self.solver.step(1)
        self.caffeTrainingLossX[
            self.caffeIter] = self.solver.net.blobs['loss_x'].data
        self.caffeTrainingLossY[
            self.caffeIter] = self.solver.net.blobs['loss_y'].data
        self.caffeTrainingLossZ[
            self.caffeIter] = self.solver.net.blobs['loss_z'].data
        self.caffeTrainingLossYaw[
            self.caffeIter] = self.solver.net.blobs['loss_yaw'].data
        if self.caffeIter % 50 == 0 and self.caffeIter > 0:
            print 'Writing File : train_loss.npy'
            np.save('train_loss_x.npy',
                    self.caffeTrainingLossX[0:self.caffeIter])
            np.save('train_loss_y.npy',
                    self.caffeTrainingLossY[0:self.caffeIter])
            np.save('train_loss_z.npy',
                    self.caffeTrainingLossZ[0:self.caffeIter])
            np.save('train_loss_yaw.npy',
                    self.caffeTrainingLossYaw[0:self.caffeIter])

        #time.sleep(.3)
        print 'my_iter=%d, solver_iter=%d, time=%f, loss_x=%f, lossYaw=%f' % (
            self.caffeIter, self.solver.iter, time.time() - startTime,
            self.caffeTrainingLossX[self.caffeIter],
            self.caffeTrainingLossYaw[self.caffeIter])
        self.caffeIter = self.caffeIter + 1
Exemplo n.º 13
0
            if done:
                print(i, "---", ep_reward)
                break

        if i % 20 == 0:
            pass
            #video_saver.release()


if __name__ == '__main__':

    batch_size = 64
    tf_config = tf.ConfigProto()
    #tf_config.gpu_options.per_process_gpu_memory_fraction = 0.6
    tf_config.gpu_options.allow_growth = True

    saver = tf.train.Saver()

    env = gym.make('InvertedPendulum-v2')

    #print(env.action_space.high)

    with tf.Session(config=tf_config) as sess:

        actor = Actor.Actor(sess, [4], 1, 0.0001, 0.001, batch_size)
        critic = Critic.Critic(sess, [4], 1, 0.001, 0.001, 0.99,
                               actor.get_num_trainable_vars())
        actor_noise = Noise.GaussianNoise()

        train_feature(sess, env, actor, critic, actor_noise, batch_size, saver)
Exemplo n.º 14
0
    critic_lr = 1e-3
    tau = 5e-3
    gamma = 0.99
    sigma = 0.2
    critic_reg_weight = 0.0
    noise_type = "ou"

    assert noise_type in ["ou","gaussian"]

    with tf.Session(config=tf_config) as sess:
                        #state_dim : 1d, action_spec : scalar
        actor = Actor.Actor(sess, state_dim, env.action_spec().shape[0], actor_lr, tau, batch_size)
        critic = Critic.Critic(sess, state_dim, env.action_spec().shape[0], critic_lr, tau, gamma, actor.get_num_trainable_vars(),critic_reg_weight)

        if noise_type == "gaussian":
            actor_noise = Noise.GaussianNoise(action_dim=env.action_spec().shape[0],sigma=sigma)
        elif noise_type == "ou":
            actor_noise = Noise.OrnsteinUhlenbeckActionNoise(mu=np.zeros([env.action_spec().shape[0]]), sigma=sigma)

        exp_detail = utils.experiment_detail_saver(
                            domain_name, task_name, step_size,
                            actor_lr, critic_lr, tau,
                            gamma, sigma, batch_size,
                            critic_reg_weight)

        print(exp_detail)
        utils.append_file_writer(video_dir, "experiment_detail.txt", "Critic origin type : "\
                                 +critic.critic_origin_type+"\n")
        utils.append_file_writer(video_dir, "experiment_detail.txt", "Noise type : " \
                                 + noise_type + "\n")
Exemplo n.º 15
0
def train(args, actor, critic, target_actor, target_critic, actor_optm,
          critic_optm, env):
    #Initialize
    MSE = nn.MSELoss()
    global_step = 0
    epsilon = 1
    dim_state = env.observation_space.shape[0]
    dim_action = env.action_space.shape[0]
    memory = Replay.replayBuffer(args.buffersize)
    noise = Noise.OrnsteinUhlembeckActionNoise(mu=np.zeros(dim_action))
    plot_reward = []
    plot_policy = []
    plot_q = []
    plot_steps = []
    best_reward = -np.inf
    saved_reward = -np.inf
    saved_ep = 0
    average_reward = 0
    saved_ep = 0
    for episode in range(args.episodes):
        s = deepcopy(env.reset())
        ep_reward = 0
        ep_q = 0
        step = 0
        for step in range(args.maxsteps):
            global_step += 1
            epsilon = max(0, epsilon - args.epsdecay)
            a = actor.select_action(torch.tensor(s, dtype=torch.float32))
            a += noise() * epsilon
            a = np.clip(a, -1, 1)
            s2, reward, done, _ = env.step(a)
            memory.add(s, a, reward, done, s2)
            if memory.num_exp > args.batchsize:
                s_batch, a_batch, r_batch, t_batch, s2_batch = memory.sample(
                    args.batchsize)
                s_batch = torch.tensor(s_batch, dtype=torch.float32)
                a_batch = torch.tensor(a_batch, dtype=torch.float32)
                r_batch = torch.tensor(r_batch, dtype=torch.float32)
                t_batch = np.array(t_batch).astype(np.float32)
                t_batch = torch.tensor(t_batch, dtype=torch.float32)
                s2_batch = torch.tensor(s2_batch, dtype=torch.float32)

                # compute loss for q value
                a2_batch = target_actor(s2_batch)
                target_q = target_critic(s2_batch, a2_batch)
                y = r_batch + args.gamma * (1.0 - t_batch) * target_q.detach()
                # print('*')
                # print('a_batch shape:', a_batch.size())
                q = critic(s_batch, a_batch)
                q_loss = MSE(q, y)
                critic_optm.zero_grad()
                q_loss.backward()
                critic_optm.step()

                # update actor
                actor_loss = -critic(s_batch, a_batch).mean()
                actor_optm.zero_grad()
                actor_loss.backward()
                actor_optm.step()

                # update target net
                for target_param, param in zip(target_critic.parameters(),
                                               critic.parameters()):
                    target_param.data.copy_(target_param.data *
                                            (1 - args.tau) +
                                            args.tau * param.data)
                for target_param, param in zip(target_actor.parameters(),
                                               actor.parameters()):
                    target_param.data.copy_(target_param.data *
                                            (1 - args.tau) +
                                            args.tau * param.data)
            s2 = s
            ep_reward += reward
            if done:
                noise.reset()
                break
        try:
            plot_reward.append([ep_reward, episode + 1])
            plot_policy.append([actor_loss.data, episode + 1])
            plot_q.append([q_loss.data, episode])
            plot_steps.append([step + 1, episode + 1])
        except:
            continue
        if ep_reward > best_reward:
            torch.save(actor.state_dict(), 'best_model_pendulum.pkl')
            best_reward = ep_reward
        print(ep_reward)
    plt.figure()
    plt.plot(np.array(plot_reward)[:, 1], np.array(plot_reward)[:, 0])
    plt.title('reward vs episodes')
    plt.savefig('./reward.png')
    plt.figure()
    plt.plot(np.array(plot_policy)[:, 1], np.array(plot_policy)[:, 0])
    plt.title('actor loss vs episodes')
    plt.savefig('./actor.png')
    plt.figure()
    plt.plot(np.array(plot_q)[:, 1], np.array(plot_q)[:, 0])
    plt.savefig('./q.png')
    plt.figure()
    plt.plot(np.array(plot_steps)[:, 1], np.array([plot_steps])[:, 0])
    plt.savefig('./steps.png')
Exemplo n.º 16
0
env = gym.make(problem)

num_states = env.observation_space.shape[0]
print("Size of State Space ->  {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space ->  {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action ->  {}".format(upper_bound))
print("Min Value of Action ->  {}".format(lower_bound))


# Noise's parameters
initial_noise_factor = 1.1
std_dev = 0.3
ou_noise = Noise.OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))

# Number of episode
total_episodes = 350

# Number of max time step per episode and number of total exploration step
max_episode_length = 1000
beginning_exploration_steps = 100000

# Used to update target networks
tau = 0.005

# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
Exemplo n.º 17
0
'''
DEBUG = True

# Definisco i parametri del sistema
STATE_DIM = 3
PARTICLES = 200
TIME = 50

sys_par = Parameters(STATE_DIM, PARTICLES, TIME)
'''
Definisco i parametri relativi al rumore
'''
SIGMA_U = 10
SIGMA_V = 1

noise = Noise(sys_par.state_dim, sys_par.state_dim, SIGMA_U, SIGMA_V)
'''
Preparo le locazioni di memoria per i risultati
'''
x = zeros((sys_par.state_dim, TIME))
y = zeros((sys_par.state_dim, TIME))
u = zeros((sys_par.state_dim, TIME))
v = zeros((sys_par.state_dim, TIME))
'''
Inizializzazione stato iniziale
'''
xh0 = 0
'''
Generazione de
'''
for index in range(0, sys_par.state_dim):
Exemplo n.º 18
0
    with tf.Session(config=tf_config) as sess:
        #state_dim : 1d, action_spec : scalar

        if actor_type == "basic":
            actor = Actor.Actor(sess, state_dim, action_dim, actor_lr, tau,
                                batch_size)
        elif actor_type == "rnn":
            actor = RNNActor.Actor(sess, state_dim, action_dim, actor_lr, tau,
                                   batch_size, num_of_action)

        critic = Critic.Critic(sess, state_dim, action_dim, critic_lr, tau,
                               gamma, actor.get_num_trainable_vars(),
                               critic_reg_weight)

        if noise_type == "gaussian":
            actor_noise = Noise.GaussianNoise(action_dim=action_dim,
                                              sigma=sigma)
        elif noise_type == "ou":
            actor_noise = Noise.OrnsteinUhlenbeckActionNoise(mu=np.zeros(
                [int(action_dim / num_of_action)]),
                                                             sigma=sigma)

        exp_detail = utils.experiment_detail_saver(domain_name, task_name,
                                                   step_size, actor_lr,
                                                   critic_lr, tau, gamma,
                                                   sigma, batch_size,
                                                   critic_reg_weight)

        print(exp_detail)
        utils.append_file_writer(video_dir, "experiment_detail.txt", "num of action : " \
                        + str(num_of_action) + "\n")
        print("num of action : " + str(num_of_action))
Exemplo n.º 19
0
    def run_filter(self):
        self.list_to_filter = self.images_list
        self.filtered_images.clear()
        self.list_widget_filter.clear()
        self.progress_bar_filter.setValue(0)
        self.completed = 0
        length = len(self.images_list)
        if self.radio_button_median.isChecked():
            self.filter_type = "Median"
            for e in self.images_list:
                img = cv2.imread(e)
                img = Filters.Median_filter(img, 3)
                self.filtered_images.append(img)
                img = cv2.resize(img, (150, 150))
                image = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.shape[1],
                                     QtGui.QImage.Format_Grayscale8)
                icon = QtGui.QIcon()
                icon.addPixmap(QtGui.QPixmap.fromImage(image), QtGui.QIcon.Normal, QtGui.QIcon.Off)
                item = QListWidgetItem(self.getShortFilePath(e))
                item.setIcon(icon)

                self.list_widget_filter.addItem(item)
                self.completed += int(100 / length)
                self.progress_bar_filter.setValue(self.completed)
            self.progress_bar_filter.setValue(100)
        if self.radio_button_gaussian.isChecked():
            self.filter_type = "Gaussian"
            for e in self.images_list:
                img = cv2.imread(e)
                img = Filters.GaussianBlurImage(img, 3)
                self.filtered_images.append(img)
                img = cv2.resize(img, (150, 150))
                image = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.shape[1],
                                     QtGui.QImage.Format_Grayscale8)
                icon = QtGui.QIcon()
                icon.addPixmap(QtGui.QPixmap.fromImage(image), QtGui.QIcon.Normal, QtGui.QIcon.Off)
                item = QListWidgetItem(self.getShortFilePath(e))
                item.setIcon(icon)

                self.list_widget_filter.addItem(item)
                self.completed += int(100 / length)
                self.progress_bar_filter.setValue(self.completed)
            self.progress_bar_filter.setValue(100)
        if self.radio_button_bilateral.isChecked():
            self.filter_type = "Bilateral"
            for e in self.images_list:
                img = cv2.imread(e)
                img = Filters.Bilateral_filter(img, 30, 30)
                self.filtered_images.append(img)
                img = cv2.resize(img, (150, 150))
                image = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.shape[1],
                                     QtGui.QImage.Format_Grayscale8)
                icon = QtGui.QIcon()
                icon.addPixmap(QtGui.QPixmap.fromImage(image), QtGui.QIcon.Normal, QtGui.QIcon.Off)
                item = QListWidgetItem(self.getShortFilePath(e))
                item.setIcon(icon)

                self.list_widget_filter.addItem(item)
                self.completed += int(100 / length)
                self.progress_bar_filter.setValue(self.completed)
            self.progress_bar_filter.setValue(100)
        if self.radio_speckle.isChecked():
            self.filter_type = "SpeckleNoise"
            for e in self.images_list:
                img = cv2.imread(e)
                img = Noise.noise_Speckle(img)
                self.filtered_images.append(img)
                img = cv2.resize(img, (150, 150))
                image = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.shape[1],
                                     QtGui.QImage.Format_Grayscale8)
                icon = QtGui.QIcon()
                icon.addPixmap(QtGui.QPixmap.fromImage(image), QtGui.QIcon.Normal, QtGui.QIcon.Off)
                item = QListWidgetItem(self.getShortFilePath(e))
                item.setIcon(icon)

                self.list_widget_filter.addItem(item)
                self.completed += int(100 / length)
                self.progress_bar_filter.setValue(self.completed)
            self.progress_bar_filter.setValue(100)
        if self.radio_noisegauss.isChecked():
            self.filter_type = "GaussianNoise"
            for e in self.images_list:
                img = cv2.imread(e)
                img = Noise.noise_Gaussian(img)
                self.filtered_images.append(img)
                img = cv2.resize(img, (150, 150))
                image = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.shape[1],
                                     QtGui.QImage.Format_Grayscale8)
                icon = QtGui.QIcon()
                icon.addPixmap(QtGui.QPixmap.fromImage(image), QtGui.QIcon.Normal, QtGui.QIcon.Off)
                item = QListWidgetItem(self.getShortFilePath(e))
                item.setIcon(icon)

                self.list_widget_filter.addItem(item)
                self.completed += int(100 / length)
                self.progress_bar_filter.setValue(self.completed)
            self.progress_bar_filter.setValue(100)
        if self.radio_saltpeper.isChecked():
            self.filter_type = "SaltPeperNoise"
            for e in self.images_list:
                img = cv2.imread(e)
                img = Noise.noise_SaltPepper(img)
                self.filtered_images.append(img)
                img = cv2.resize(img, (150, 150))
                image = QtGui.QImage(img.data, img.shape[1], img.shape[0], img.shape[1],
                                     QtGui.QImage.Format_Grayscale8)
                icon = QtGui.QIcon()
                icon.addPixmap(QtGui.QPixmap.fromImage(image), QtGui.QIcon.Normal, QtGui.QIcon.Off)
                item = QListWidgetItem(self.getShortFilePath(e))
                item.setIcon(icon)

                self.list_widget_filter.addItem(item)
                self.completed += int(100 / length)
                self.progress_bar_filter.setValue(self.completed)
            self.progress_bar_filter.setValue(100)
Exemplo n.º 20
0
    def learning_iteration(self):
        """Does 1 iteration.
            Basically retrives the rendered image and pose from queue. Preprocess/Purturb it
            and feed it into caffe for training.
        """
        # print 'DoCaffeTrainng'
        startTime = time.time()
        # print 'q_imStack.qsize() : ', self.q_imStack.qsize()
        # print 'q_labelStack.qsize() : ', self.q_labelStack.qsize()

        # if too few items in queue do not proceed with iterations
        if self.q_imStack.qsize() < 16 * 5:
            return None

        batchsize = 12
        im_batch = np.zeros((batchsize, 240, 320, 3))
        label_batch = np.zeros((batchsize, 4))
        # print 'batchsize', batchsize
        # print "self.solver.net.blobs['data'].data", self.solver.net.blobs['data'].data.shape
        # print "self.solver.net.blobs['label_x'].data",self.solver.net.blobs['label_x'].data.shape
        for i in range(batchsize):
            im = self.q_imStack.get()  #240x320x3 RGB
            y = self.q_labelStack.get()

            im_noisy = Noise.noisy('gauss', im)
            im_gry = np.mean(im_noisy, axis=2)

            # print im.shape
            # itr_indx = TrainRenderer.renderIndx
            # cv2.imwrite( 'dump/'+str(itr_indx)+'_'+str(i)+'.png', cv2.cvtColor( im.astype('uint8'), cv2.COLOR_BGR2RGB ) )

            #TODO remember to z-normalize
            im_batch[i, :, :, 0] = self.zNormalized(im[:, :, 0])
            im_batch[i, :, :, 1] = self.zNormalized(im[:, :, 1])
            im_batch[i, :, :, 2] = self.zNormalized(im[:, :, 2])
            label_batch[i, 0] = y[0]
            label_batch[i, 1] = y[1]
            label_batch[i, 2] = y[2]
            label_batch[i, 3] = y[3]

            # cencusTR = ct.censusTransform( im_gry.astype('uint8') )
            # edges_out = cv2.Canny(cv2.blur(im_gry.astype('uint8'),(3,3)),100,200)

        lr = self.get_learning_rate(self.tensorflow_iteration)

        _,aa,ss = self.tensorflow_session.run( [self.tensorflow_apply_grad,self.tensorflow_cost,self.tensorflow_summary_op], \
                        feed_dict={self.tf_x:im_batch,\
                        self.tf_label_x:label_batch[:,0:1], \
                        self.tf_label_y:label_batch[:,1:2], \
                        self.tf_label_z:label_batch[:,2:3], \
                        self.tf_label_yaw:label_batch[:,3:4], \
                        self.tf_learning_rate:lr} )

        print '[%4d] : cost=%0.4f ; time=%0.4f ms' % (
            self.tensorflow_iteration, aa, (time.time() - startTime) * 1000.)

        # Write Summary for TensorBoard
        if self.tensorflow_iteration % self.PARAM_WRITE_SUMMARY_EVERY == 0 and self.tensorflow_iteration > 0:
            print 'write_summary()'
            self.tensorflow_summary_writer.add_summary(
                ss, self.tensorflow_iteration)

        # Snapshot model
        if self.tensorflow_iteration % self.PARAM_WRITE_TF_MODEL_EVERY == 0 and self.tensorflow_iteration > 0:
            sess = self.tensorflow_session
            pth = self.PARAM_MODEL_SAVE_PREFIX
            step = self.tensorflow_iteration
            save_path = self.tensorflow_saver.save(sess, pth, global_step=step)
            print 'snapshot model()', save_path

        # Try testing every 100 iterations
        # if self.tensorflow_iteration % 100 == 0 and self.tensorflow_iteration > 0:
        # self.do_test_evaluation(100)

        self.tensorflow_iteration = self.tensorflow_iteration + 1
Exemplo n.º 21
0
    # Images are being flipped over X, Y, and the center automatically,
    # and each of them is being saved as a new data/image in the directory
    for j in range(-1, 2):
        flipImg = Flip.flipImage(img, j)
        cv2.imwrite(writePath + f"Flipped{j}{i}", flipImg)

    ###########################
    ############ 4 ############
    #  Hue image manipulation
    hueImg = Hue.hueImage(img, val=250)
    cv2.imwrite(writePath + f"Hue{i}", hueImg)

    ###########################
    ############ 5 ############
    # Noise image manipulation
    noisyImage = Noise.noisyImage(img, val=0.1)
    cv2.imwrite(writePath + f"Noisy{i}", noisyImage)

    ###########################
    ############ 6 ############
    # Black &  White image manipulation
    blackWhiteImage = BlackAndWhite.blackWhiteImage(img)
    cv2.imwrite(writePath + f"BlackAndWhite{i}", blackWhiteImage)

    ###########################
    ############ 7 ############
    # Shear image manipulation
    img = io.imread(inputPath + f"{i}")
    imgShear = Shear.shearImage(img)
    io.imsave(writePath + f"Shear{i}", imgShear)