def main(argv): args = parser.parse_args(argv[1:]) # Fetch the data (train_x, train_y), (test_x, test_y) = simu_data.load_data() # Feature columns describe how to use the input. my_feature_columns = [] for key in train_x.keys(): my_feature_columns.append( tf.feature_column.numeric_column(key=key)) # Build 2 hidden layer DNN with 10, 10 units respectively. classifier = tf.estimator.Estimator( model_fn=gLasso_model, params={ 'feature_columns': my_feature_columns, # Two hidden layers of 20 nodes each. 'hidden_units': [20, 20], # The model output. 'n_response': 1, }) # Train the Model. classifier.train( input_fn=lambda: simu_data.train_input_fn( train_x, train_y, args.batch_size), steps=args.train_steps) # Evaluate the model. eval_result = classifier.evaluate( input_fn=lambda: simu_data.eval_input_fn(test_x, test_y, args.batch_size)) # extract variables from model var_dict = dict() for var_name in classifier.get_variable_names(): var_dict[var_name] = classifier.get_variable_value(var_name) print('\nTest set MSE: {MSE:0.3f}\n'.format(**eval_result))
classifier = tf.estimator.Estimator(model_fn=gLasso_model, params={ 'feature_columns': my_feature_columns, 'hidden_units': [0], 'n_response': 1, 'reg': reg, }) classifier.train(input_fn=lambda: simu_data.train_input_fn( train_x, train_y, 100), steps=10000) eval_result = classifier.evaluate( input_fn=lambda: simu_data.eval_input_fn(test_x, test_y, 100)) var_dict = dict() for var_name in classifier.get_variable_names(): var_dict[var_name] = classifier.get_variable_value(var_name) v1 = np.zeros(p) v1[0] = v1[3] = v1[6] = 1 v1 = v1.reshape((1, -1)) v2 = np.linalg.norm(var_dict["dense/kernel"], axis=1).reshape( (1, -1)) cos_dis = cosine_similarity(v1, v2) cos_dis = cos_dis[0][0] spec_norm = np.linalg.norm(var_dict["dense/kernel"], 2)