Exemplo n.º 1
0
    def _get_cnn_output(self, inputs, model_name):
        """ Get conditional probabilities for input maps from the trained CNN model. """

        outputs_path = self.cnn_outputs_path
        # if CNN output is there, load it
        if os.path.isfile(outputs_path):
            return np.load(outputs_path)
        # otherwise get it from CNN model
        model_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\
                     + '/trained_models/' + model_name
        cnn_model = get_model(model_path)
        # add color channel axis for network input
        inputs = np.expand_dims(inputs, axis=1)
        outputs = cnn_model(inputs)
        np.save(outputs_path, outputs)
        return outputs
Exemplo n.º 2
0
 def get_cnn_output(self):
     """
     Get the conditional probabilities from the trained model.
     """
     trained_models_path = self.config.trained_model_path
     model_path = trained_models_path + self.model_name
     save_path = '{}/{}_{}_output.npy'.format(model_path, self.width,
                                              self.height)
     if os.path.isfile(save_path) and (not self.force_predict):
         probs = np.load(save_path)
     else:
         if self.cnn_model is None:
             self.cnn_model = get_model(model_path)
         # probs has shape (num_channels, width, height)
         probs = self.cnn_model(self.map[None, None, :, :])[0]
         np.save(save_path, probs)
     return probs
Exemplo n.º 3
0
def train_with_pars(conf_file, loss_fn, exp_folder, exp_name, **parameters):
    f = fileinput.input(files=(conf_file), inplace=True)

    for line in f:
        write_str = line
        for par_name in parameters:
            assign_str = par_name + ' = '
            if line.startswith(assign_str):
                write_str = assign_str + str(parameters[par_name]) + '\n'
        print(write_str, end='')

    f.close()
    print('', end='\n')

    cf = imp.load_source('config', conf_file)
    cf.savepath = exp_folder + exp_name
    cf.config_path = conf_file

    initiate_training(cf)

    pred_fn = get_model(exp_folder + exp_name)
    return test(pred_fn, loss_fn, mode='val')
Exemplo n.º 4
0
    if not dataset:
        dataset = 'test'
    if not show_num:
        show_num =  10
    config_path_root = '/local/home/ful7rng/projects/transition/'
    if not config_file:
        config_path = config_path_root + 'config.py'
    else:
        config_path = config_path_root + config_file + '.py'

    cf = imp.load_source('config', config_path)
    trained_models_path = cf.project_folder + '/trained_models/'


    models_names_ = [trained_models_path+name for name in models_names]
    models = [get_model(name) for name in models_names_]
    train_iter, val_iter, test_iter = get_iters(config_path)
    if dataset == 'train':
        iter = train_iter
    elif dataset == 'val':
        iter = val_iter
    elif dataset == 'test':
        iter = test_iter

    # randomly iterate until any batch
    for _ in range(np.random.randint(1, iter.get_n_batches(), 1)):
        iter.next()
    inputs, outputs, masks = iter.next()

    directions = cf.directions
Exemplo n.º 5
0
from sklearn.preprocessing import normalize

print(tf.__version__)

imported = tf.saved_model.load('./tf_arcface_mobilefacenet/')
concrete_func = imported.signatures['serving_default']
print(concrete_func.structured_outputs)
print(concrete_func.inputs)
img = cv2.imread('./aligned_faces/000/000_0.bmp')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
input_data = np.asarray(img, np.float32)
output = concrete_func(tf.convert_to_tensor(np.expand_dims(input_data,
                                                           axis=0)))
print(output)

arcface_mx = get_model('./model/model-y1-test2', mx.cpu())

print('get list file')
aligned_faces_dir = '.\\aligned_faces'
idx_list, image_idx_list = get_idx_list(aligned_faces_dir)
print(idx_list[0], image_idx_list[0])
features = []
total_time = 0
total_time_arcface = 0
features_arcface = []
for image_idx in image_idx_list:
    idx, img_name = image_idx
    origin_img = cv2.imread(os.path.join(aligned_faces_dir, idx, img_name))
    t = time.time()
    img = cv2.cvtColor(origin_img, cv2.COLOR_BGR2RGB)
    f = concrete_func(
Exemplo n.º 6
0
def train():
	fcscnn = get_model()
	fcscnn_test = test.get_model()
	raw_list_temp, fix_list_temp = get_all_training_list()
	learning_rate = 0.002
	momentum = 0.9
	max_grad_norm = 5000
	# optmzr = mx.optimizer.SGD(momentum = momentum, learning_rate = learning_rate, wd = 0.0002)
	optmzr = mx.optimizer.Adam(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-07, decay_factor=0.99999999)
	# optmzr = mx.optimizer.RMSProp(gamma1=0.95, gamma2=0.9)
	# optmzr = mx.optimizer.SGLD(learning_rate = learning_rate, wd = 0.0002, rescale_grad = 1.0, clip_gradient = 10.0)
	# optmzr = mx.optimizer.AdaDelta(rho=0.9, epsilon=1e-05, wd = 0.0002)
	# optmzr = mx.optimizer.AdaGrad(eps=1e-07)
	# optmzr = mx.optimizer.ccSGD(momentum=0.9, rescale_grad=1.0, clip_gradient=-1.0)

	updater = mx.optimizer.get_updater(optmzr)

	arg_dict_params = mx.nd.load('.\\params\\saving\\arg_dict.params')
	aux_dict_params = mx.nd.load('.\\params\\saving\\aux_dict.params')

	for arg in fcscnn.arg_dict:
		for param in arg_dict_params:
			if arg == param:
				if fcscnn.arg_dict[arg].shape != arg_dict_params[param].shape:
					print arg + ' is wrong'
					continue
				fcscnn.arg_dict[arg][:] = arg_dict_params[param].asnumpy()
				print 'using old ' + arg

	for aux in fcscnn.aux_dict:
		for param in aux_dict_params:
			if aux == param:
				if fcscnn.aux_dict[aux].shape != aux_dict_params[param].shape:
					print aux + ' is wrong'
					continue
				fcscnn.aux_dict[aux][:] = aux_dict_params[param].asnumpy()
				print 'using old ' + aux

	# params = mx.nd.load('.\\params\\vgg19.params')
	# fcscnn.arg_dict['conv_lcl_0_weight'][:] = params['arg:conv1_1_weight'].asnumpy()
	# fcscnn.arg_dict['conv_lcl_1_weight'][:] = params['arg:conv1_2_weight'].asnumpy()
	# fcscnn.arg_dict['conv_lcl_2_weight'][:] = params['arg:conv2_1_weight'].asnumpy()
	# fcscnn.arg_dict['conv_lcl_3_weight'][:] = params['arg:conv2_2_weight'].asnumpy()
	# fcscnn.arg_dict['conv_lcl_4_weight'][:] = params['arg:conv3_1_weight'].asnumpy()
	# fcscnn.arg_dict['conv_lcl_5_weight'][:] = params['arg:conv3_2_weight'].asnumpy()
	# fcscnn.arg_dict['conv_lcl_6_weight'][:] = params['arg:conv3_3_weight'].asnumpy()
	# fcscnn.arg_dict['conv_lcl_7_weight'][:] = params['arg:conv3_4_weight'].asnumpy()
	# fcscnn.arg_dict['conv_glb_0_weight'][:] = params['arg:conv1_1_weight'].asnumpy()
	# fcscnn.arg_dict['conv_glb_1_weight'][:] = params['arg:conv1_2_weight'].asnumpy()

	for k in range(0, num_epoch):
		raw_list, fix_list, _ = shuffle_list(raw_list_temp, fix_list_temp)
		for i in range(0,int(1980/40)):
			raw_list_patch = raw_list[i*40 : (i+1)*40] # 40张图, 40*32, 共10个minibatch,每个minibatch含有128个regions
			fix_list_patch = fix_list[i*40 : (i+1)*40] # 40张fix map
			# lcl_region_list has 40*32=1280 regions
			lcl_region_list, glb_region_list, fix_region_list  = get_shuffled_regions(raw_list_patch, fix_list_patch)
			
			print 'this batch has ' + str(len(lcl_region_list)) + ' patches'
			if len(lcl_region_list) < 1280:
				print 'jump over this 10 batch for not enough valid patches...'
				continue

			for j in range(0,10):
				batch_lcl = np.empty((128, 3, 48, 48))
				batch_glb = np.empty((128, 3, 48, 48))
				batch_fix = np.empty((128, 1, 8, 8))
				for idx in range(0,128):
					batch_lcl[idx] = lcl_region_list[j*128+idx]
					batch_glb[idx] = glb_region_list[j*128+idx]
					batch_fix[idx] = fix_region_list[j*128+idx]

				# print np.max(batch_lcl)
				# print np.min(batch_lcl)
				# print np.max(batch_glb)
				# print np.min(batch_glb)
				fcscnn.data_lcl[:] = batch_lcl
				fcscnn.data_glb[:] = batch_glb
				fcscnn.label[:] = batch_fix
				fcscnn.exc.forward(is_train = True)
				fcscnn.exc.backward()

				print 'max: '+str(np.max(fcscnn.exc.outputs[0].asnumpy()))+'...'+str((np.max(batch_fix)))
				print 'mea: '+str(np.mean(fcscnn.exc.outputs[0].asnumpy()))+'...'+str((np.mean(batch_fix)))
				print 'min: '+str(np.min(fcscnn.exc.outputs[0].asnumpy()))+'...'+str((np.min(batch_fix)))

				norm = 0
				for name in fcscnn.grd_dict:
					l2_norm = mx.nd.norm(fcscnn.grd_dict[name] / 128.0).asscalar()
					norm = norm + l2_norm * l2_norm
				norm = math.sqrt(norm)

				print str(i) + 'th batch pack end at ' + str(i*40+j*4) + 'th pic with grad norm: ' + str(norm) + ' ,epoch ' + str(k) + ', at ' + str(datetime.utcnow())

				for idx in range(0, len(fcscnn.arg_name_list)):
					name = fcscnn.arg_name_list[idx]
					# fcscnn.grd_dict[name] \\= 64.0
					if norm > max_grad_norm:
						fcscnn.grd_dict[name][:] = fcscnn.grd_dict[name] * (max_grad_norm / norm)
					updater(index = idx, weight = fcscnn.arg_dict[name], grad = fcscnn.grd_dict[name])#, state = fcscnn.aux_dict[name])
					fcscnn.grd_dict[name][:] = 0
				# ================================================================================
				mx.nd.save('.\\params\\saving\\arg_dict.params', fcscnn.arg_dict)
				mx.nd.save('.\\params\\saving\\grd_dict.params', fcscnn.grd_dict)
				mx.nd.save('.\\params\\saving\\aux_dict.params', fcscnn.aux_dict)

				# test ...
				if j%2 == 0:
					rand_idx = np.random.randint(0, 1980)
					raw_img = raw_list[rand_idx]
					fix_img = fix_list[rand_idx]
					print 'testing on ' + str(i) + 'th img :' + raw_img
					test.test(raw_img, fix_img, fcscnn_test, k , i, j)
Exemplo n.º 7
0
	return args

if __name__ == '__main__':
	""" python grad_cam.py <path_to_image>
	1. Loads an image with opencv.
	2. Preprocesses it for VGG19 and converts to a pytorch variable.
	3. Makes a forward pass to find the category index with the highest score,
	and computes intermediate activations.
	Makes the visualization. """

	#args = get_args()
	image-path = "imgs/women2.jpg"
	use_cuda = True	
	opt = TestOptions().parse()	

	models = test.get_model()
	# Can work with any model, but it assumes that the model has a 
	# feature method, and a classifier method,
	# as in the VGG models in torchvision.
	grad_cam = GradCam(model = models, \
					target_layer_names = ["7"], use_cuda=use_cuda)

	img = cv2.imread(image_path, 1)
	img = np.float32(cv2.resize(img, (128, 64))) / 255
	input = preprocess_image(img)

	# If None, returns the map for the highest scoring category.
	# Otherwise, targets the requested index.
	target_index = None

	mask = grad_cam(input, target_index)
Exemplo n.º 8
0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from test import get_model,get_mnist_test_data
import time

X_test = get_mnist_test_data()
model = get_model()

# Get 25 random numbers between 0 and 28000
random_idx = np.random.random_integers(0,28000,size=25)

# Predict for the randomly selected 25 images
y_test = np.argmax(model.predict(X_test[random_idx],verbose=0),axis=1)

# A function to generate a grid of the 25 images with their corresponding predicted values
def generate_grid():
    plt.figure()
    for i in range(25):
        plt.subplot(5,5,i)
        plt.axis('off')
        plt.imshow(X_test[random_idx[i],0,:,:],cmap=cm.binary)
        plt.text(3,5,str(y_test[i]),fontsize=15,bbox={'alpha':0.3,'pad':9})
    plt.show()

# Display the images with their predicted values at intervals of 5 seconds
for i in range(25):
	plt.figure()
	plt.axis('off')
	plt.imshow(X_test[random_idx[i],0,:,:],cmap=cm.binary)
	plt.text(3,4,str(y_test[i]),fontsize=40,bbox={'alpha':0.5,'pad':10})
Exemplo n.º 9
0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from test import get_model, get_mnist_test_data
import time

X_test = get_mnist_test_data()
model = get_model()

# Get 25 random numbers between 0 and 28000
random_idx = np.random.random_integers(0, 28000, size=25)

# Predict for the randomly selected 25 images
y_test = np.argmax(model.predict(X_test[random_idx], verbose=0), axis=1)


# A function to generate a grid of the 25 images with their corresponding predicted values
def generate_grid():
    plt.figure()
    for i in range(25):
        plt.subplot(5, 5, i)
        plt.axis('off')
        plt.imshow(X_test[random_idx[i], 0, :, :], cmap=cm.binary)
        plt.text(3,
                 5,
                 str(y_test[i]),
                 fontsize=15,
                 bbox={
                     'alpha': 0.3,
                     'pad': 9
                 })