def __init__(self): Interface.__init__(self) Loader.__init__(self) Resizer.__init__(self) Processor.__init__(self) Colorizer.__init__(self) Displayer.__init__(self) getattr(self, self.args.command)()
svmgamma = [0.25] svmC = [0.5] graphcut_lambda = [1] params = list( itertools.product(ncolors, npca, svmgamma, svmC, graphcut_lambda, input_files)) p = params[0] #which parameter range this node should use #chunk_size = int(len(input_files)/comm.size) #start = comm.rank * chunk_size #stop = start + chunk_size c = Colorizer(ncolors=p[0], npca=p[1], svmgamma=p[2], svmC=p[3], graphcut_lambda=p[4]) #train the classifiers c.train(training_files) try: print(input_files[comm.rank]) #for now, convert an already RGB image to grayscale for our input grayscale_image = get_grayscale_from_color(input_files[comm.rank]) #colorize the input image colorized_image, g = c.colorize(grayscale_image, skip=2)
params = list(itertools.product(ncolors, npca, svmgamma, svmC, graphcut_lambda, ntrain)) #which parameter range this node should use chunk_size = int(len(params)/comm.size) start = comm.rank * chunk_size stop = start + chunk_size #print('image, ncolors, npca, svmgamma, svmC, graphcut_lambda, ntrain, avgError') for (ind, p) in enumerate(params[start:stop]): try: # print('image: %d\t ncolors=%f, npca=%f, svmgamma=%f, svmC=%f, graphcut_lambda=%f, ntrain=%f'%(comm.rank*chunk_size+ind, p[0], p[1], p[2], p[3], p[4], p[5])) c = Colorizer(ncolors=p[0], npca=p[1], svmgamma=p[2], svmC=p[3], graphcut_lambda=p[4], ntrain = p[5]) # c = Colorizer(ncolors=4, npca=32, svmgamma=0.1, svmC=0.25, graphcut_lambda=0.5, ntrain = 1000) #train the classifiers c.train(training_files) #for now, convert an already RGB image to grayscale for our input grayscale_image = get_grayscale_from_color(input_file) #colorize the input image colorized_image, g = c.colorize(grayscale_image,skip=1) colorized_image = c.smooth(colorized_image) #save the outputs #cv2.imwrite('output_gray.jpg', grayscale_image) #cv2.imwrite('output_color.jpg', cv2.cvtColor(colorized_image, cv.CV_RGB2BGR))
#def __init__(self, ncolors=16, probability=False, npca=30, svmgamma=0.1, svmC=1, graphcut_lambda=1): ncolors = [8] npca = [64] svmgamma = [0.1] svmC = [1] graphcut_lambda = [1.1] params = list(itertools.product(ncolors, npca, svmgamma, svmC, graphcut_lambda, input_files)) p = params[0] #which parameter range this node should use chunk_size = int(len(input_files)/comm.size) start = comm.rank * chunk_size stop = start + chunk_size c = Colorizer(ncolors=p[0], npca=p[1], svmgamma=p[2], svmC=p[3], graphcut_lambda=p[4]) #train the classifiers c.train(training_files) try: for file in input_files[start:stop]: print('Processing: '+os.path.basename(file)) #for now, convert an already RGB image to grayscale for our input grayscale_image = get_grayscale_from_color(file) #colorize the input image colorized_image, g = c.colorize(grayscale_image,skip=2) l,a,b = cv2.split(cv2.cvtColor(colorized_image, cv.CV_BGR2Lab))
#training_files = ['images/book_chapter/islande.jpg' ] #input_file = 'images/book_chapter/paysage_gris.png' #training_files = ['test/jp.jpg' ] #input_file = 'test/chris.jpg' #training_files = ['images/houses/calhouse_0001.jpg' ] #input_file = 'images/houses/calhouse_0002.jpg' #training_files = ['test/ch1.jpg'] #input_file = 'test/ch1.jpg' #training_files = ['images/cats/cat.jpg','images/cats/cats4.jpg'] #input_file = 'images/cats/cats3.jpg' c = Colorizer(probability=False) x = sc.parallelize([1]) #train the classifiers x = x.map(lambda y: c.train(training_files)) y = x.collect() c = y[0] #for now, convert an already RGB image to grayscale for our input grayscale_image = get_grayscale_from_color(input_file) #colorize the input image colorized_image, g = c.colorize(grayscale_image, skip=8) print('min g = %f, max g = %f' % (np.min(g), np.max(g))) #save the outputs
import pickle from colorizer import Colorizer def get_grayscale_from_color(color_file): ''' Takes the path to a RGB image file and returns a numpy array of its luminance ''' L, _, _ = cv2.split(cv2.cvtColor(cv2.imread(color_file), cv.CV_BGR2Lab)) return L if __name__ == '__main__': training_files = ['images/houses/calhouse_0001.jpg' ] input_file = 'images/houses/calhouse_0007.jpg' c = Colorizer(probability=False) img = get_grayscale_from_color(input_file) #load saved colorization data (pre-graphcut) f = open('dump.dat', 'rb') d = pickle.load(f) f.close() #inject data into colorizer object c.colors_present = d['colors'] c.g = d['g'] c.label_to_color_map = d['cmap'] for l in range(0, 10, 1): print('l=%d'%l) output_labels = c.graphcut(d['S'], l=l)
ntrain)) #which parameter range this node should use chunk_size = int(len(params) / comm.size) start = comm.rank * chunk_size stop = start + chunk_size #print('image, ncolors, npca, svmgamma, svmC, graphcut_lambda, ntrain, avgError') for (ind, p) in enumerate(params[start:stop]): try: # print('image: %d\t ncolors=%f, npca=%f, svmgamma=%f, svmC=%f, graphcut_lambda=%f, ntrain=%f'%(comm.rank*chunk_size+ind, p[0], p[1], p[2], p[3], p[4], p[5])) c = Colorizer(ncolors=p[0], npca=p[1], svmgamma=p[2], svmC=p[3], graphcut_lambda=p[4], ntrain=p[5]) # c = Colorizer(ncolors=4, npca=32, svmgamma=0.1, svmC=0.25, graphcut_lambda=0.5, ntrain = 1000) #train the classifiers c.train(training_files) #for now, convert an already RGB image to grayscale for our input grayscale_image = get_grayscale_from_color(input_file) #colorize the input image colorized_image, g = c.colorize(grayscale_image, skip=1) colorized_image = c.smooth(colorized_image) #save the outputs #cv2.imwrite('output_gray.jpg', grayscale_image)
real 0m47.679s user 2m11.769s """ orignal_image_folder = 'datasets/preprocessed_test' filenames = [ os.path.join(orignal_image_folder, record) for record in os.listdir(orignal_image_folder) if os.path.isfile(os.path.join(orignal_image_folder, record)) ] n_epochs = 1 batch_size = 4 iterator = DatasetIterator(filenames, n_epochs, batch_size, shuffle=True) colorizer = Colorizer(iterator) new_image_node, example_node = colorizer.showcase() saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, 'models/my-model') sess.run(iterator.initializer()) new_ab_list, example = sess.run([new_image_node, example_node]) new_ab_list = colorizer.bin_prob_to_ab(new_ab_list) l_channel_list = example['l_channel'].values.reshape( (batch_size, 128, 128, 1)) labels_list = example['labels'].values.reshape(
if __name__=="__main__": training_images = ["images/grass1.jpg","images/grass2.jpg"] test_image = skio.imread("images/grass3.jpg") #getting the right featurizer f = Featurizer(training_images) f.compute_k_means() print "Getting features..." f.compute_features() gray_test = get_grayscale(test_image) #getting the right colorizer colorizer = Colorizer(f) print "Starting Training of SVMs..." colorizer.train() #running the experiment print "Colorizing Image..." colored_image = colorizer.color_image(gray_test) print "Grayscale Image" skio.imshow(gray_test) skio.show() print "Colorized Image" skio.imshow(colored_image) skio.show() skio.imsave("results/" + test_image.split("/")[1],colored_image)
graphcut_lambda = [0, 1, 2, 5, 10, 100] params = list(itertools.product(ncolors, npca, svmgamma, svmC, graphcut_lambda)) #which parameter range this node should use chunk_size = int(len(params)/comm.size) start = comm.rank * chunk_size stop = start + chunk_size #training_files = ['test/ch1.jpg'] #input_file = 'test/ch1.jpg' #training_files = ['images/cats/cat.jpg','images/cats/cats4.jpg'] #input_file = 'images/cats/cats3.jpg' c = Colorizer(probability=False) #train the classifiers c.train(training_files) #for now, convert an already RGB image to grayscale for our input grayscale_image = get_grayscale_from_color(input_file) #colorize the input image colorized_image, g = c.colorize(grayscale_image,skip=8) print('min g = %f, max g = %f'%(np.min(g), np.max(g))) #save the outputs cv2.imwrite('output_gray.jpg', grayscale_image) cv2.imwrite('output_color.jpg', cv2.cvtColor(colorized_image, cv.CV_RGB2BGR))
] n_tot = 1000 n_epochs = 20 batch_size = 50 assert not n_tot % batch_size val_batch_size = 50 learning_rate = 0.0001 iterator = DatasetIterator(filenames, batch_size=batch_size, shuffle=True) val_iterator = DatasetIterator(val_filenames, batch_size=val_batch_size, shuffle=False) colorizer = Colorizer(iterator, learning_rate) optimizer, loss_node = colorizer.training_op() colorizer.set_iterator(val_iterator) _, val_loss_node = colorizer.training_op() saver = tf.train.Saver() load = False save = True losses = [] losses_val = [] with tf.Session() as sess: if load: saver.restore(sess, 'models/my-model')
#training_files = ['images/book_chapter/islande.jpg' ] #input_file = 'images/book_chapter/paysage_gris.png' #training_files = ['test/jp.jpg' ] #input_file = 'test/chris.jpg' #training_files = ['images/houses/calhouse_0001.jpg' ] #input_file = 'images/houses/calhouse_0002.jpg' #training_files = ['test/ch1.jpg'] #input_file = 'test/ch1.jpg' #training_files = ['images/cats/cat.jpg','images/cats/cats4.jpg'] #input_file = 'images/cats/cats3.jpg' c = Colorizer(probability=False) x = sc.parallelize([1]) #train the classifiers x = x.map(lambda y: c.train(training_files)) y = x.collect() c = y[0] #for now, convert an already RGB image to grayscale for our input grayscale_image = get_grayscale_from_color(input_file) #colorize the input image colorized_image, g = c.colorize(grayscale_image,skip=8) print('min g = %f, max g = %f'%(np.min(g), np.max(g))) #save the outputs