def CUp(input, filter): structures = [('CBA', { 'filters': filter, 'kernel_size': 1, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('TF', layers.UpSampling2D(2))] return build_model.build(input, structures, return_model=False)
def CBA5(input, filter): structures = [ ('CBA', { 'filters': filter // 2, 'kernel_size': 1, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('CBA', { 'filters': filter, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('CBA', { 'filters': filter // 2, 'kernel_size': 1, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('CBA', { 'filters': filter, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('CBA', { 'filters': filter // 2, 'kernel_size': 1, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ] return build_model.build(input, structures, return_model=False)[1]
def test_accuracy(self): try: from build_model import build except ImportError: self.assertFalse( "No build method found. Please initialize build method and return ML model." ) try: X_test, y_test = pickle.load(open("data/german_test.p", "rb")) except: self.assertFalse( "No test file. Either you have moved location and deleted the file. Very soon we are changing this approach so that test cases and test data will not be part of repo so that Student can not change / delete it." ) creditClf = build() y_pred = creditClf.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print("Your accuracy is " + str(accuracy)) self.assertGreater( accuracy, 0.78, "Accuracy not acceptable. You can do better with other approach.")
def CBA2(_input, filter, class_num): input = layers.Input(_input.shape[1:]) structures = [ ('CBA', { 'filters': filter, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('TF', layers.Conv2D(filters=3 * (class_num + 5), kernel_size=1, strides=1, padding='same', use_bias=True)), ] model = build_model.build(input, structures, return_model=True) output = model(input) output = layers.Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[ 1], tf.shape(x)[2], 3, class_num + 5)))(output) return tf.keras.Model(input, output, name='output%d' % filter)
def Darknet53(): structure_256 = [ ('CBA', { 'filters': 32, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('CBA', { 'filters': 64, 'kernel_size': 3, 'strides': 2, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('Dark_res', { 'filters': 64, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('CBA', { 'filters': 128, 'kernel_size': 3, 'strides': 2, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), (('Dark_res', 2), { 'filters': 128, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ('CBA', { 'filters': 256, 'kernel_size': 3, 'strides': 2, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), (('Dark_res', 8), { 'filters': 256, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ] input, output_256 = build_model.build(None, structure_256, return_model=False) structure_512 = [ ('CBA', { 'filters': 512, 'kernel_size': 3, 'strides': 2, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), (('Dark_res', 8), { 'filters': 512, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ] _, output_512 = build_model.build(output_256, structure_512, return_model=False) structure_1024 = [ ('CBA', { 'filters': 1024, 'kernel_size': 3, 'strides': 2, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), (('Dark_res', 4), { 'filters': 1024, 'kernel_size': 3, 'strides': 1, 'padding': 'same', 'use_bias': False, 'activation': 'relu' }), ] _, output_1024 = build_model.build(output_512, structure_1024, return_model=False) model = keras.Model(input, (output_256, output_512, output_1024), name='Darknet53') return model
def create_subdirectory_if_not_exists(dir_name): if not os.path.exists(dir_name): os.mkdir(dir_name) print("Directory", dir_name, "created.") else: print("Directory", dir_name, "already exists.") # Create output directory to store preprocessed data and trained model create_subdirectory_if_not_exists("out") # Define file locations train_data = "out/preprocessed_train.npz" test_data = "out/preprocessed_test.npz" model_file = "out/model.h5" if not os.path.isfile(train_data) or not os.path.isfile(test_data): # Preprocess data preprocess(train_data=train_data, test_data=test_data) print("Data preprocessed and saved locally.") else: print("Preprocessed data exists.") # Train model build(train_data=train_data, save_file=model_file) # Evaluate model scores = evaluate(model_file, test_data) print("Final scores:", scores)
# Initialize webcam feed video = cv2.VideoCapture(0) ret = video.set(3, 1280) ret = video.set(4, 720) #Set Sequence for each image count = 0 #Set If found things it will set to True flagjer = False #Set Delay Time for Not Save Image every seconds c = 0 sess, category_index, detection_boxes, image_tensor, detection_scores, detection_classes, num_detections = build_model.build( ) while (True): # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3] # i.e. a single-column array, where each item in the column has the pixel RGB value ret, frame = video.read() frame_expanded = np.expand_dims(frame, axis=0) # Perform the actual detection by running the model with the image as input (boxes, scores, classes, num) = obj.inference1(sess, detection_boxes, image_tensor, detection_scores, detection_classes, num_detections, frame_expanded) #if classes == 1:
if sys.argv[1] == "-b": businesses = open(sys.argv[2]).read().split("\n") elif len(sys.argv) == 2: businesses.append(sys.argv[1]) else: print("Bad arguments. ") raise ValueError("Bad arguments. Should be '[bus_id]' or '-b [bus_file]'") run_analytics = False for bus_id in businesses: print("\n+----- YELP12 Sytem Running -----+\n|") print("| BID:", bus_id) model_data = build_model.build(json_file, bus_id, run_analytics) if model_data: model_num = 0 wout = open("output/OUTPUT_" + bus_id, "w+") wout.write("YELP12 OUTPUT FOR BUSINESS: " + bus_id + "\n\n") topics = [] sentences = [] reviews = [] summaries = [] i = 0