def main(nb_images=10):
    """Spot-check `nb_images` images."""
    data = DataSet()
    model = load_model('/data/d14122793/ucf101_full/checkpoints/inception.011-1.47.hdf5')

    # Get all our test images.
    images = glob.glob(os.path.join('/data/d14122793/ucf101_full', 'test', '**', '*.jpg'))

    for _ in range(nb_images):
        print('-'*80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)
        
        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
def main(nb_images=5):
    """Spot-check `nb_images` images."""
    data = DataSet()
    model = load_model('data/checkpoints/inception.057-1.16.hdf5')

    # Get all our test images.
    images = glob.glob(os.path.join('data', 'test', '**', '*.jpg'))

    for _ in range(nb_images):
        print('-'*80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)
        
        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
示例#3
0
    def build_image_sequence(self, frames, chanel_3d=False, type_0=0):
        """Given a set of frames (filenames), build our sequence."""

        result = [
            process_image(x, self.image_shape, chanel_3d, type_0)
            for x in frames
        ]
        # result = []
        # if type_0 in (0, 1, 2):
        #     result = [process_image(x, self.image_shape, chanel_3d, type_0) for x in frames]
        # elif type_0 >= 3:
        #     j = random.randint(0, 15)
        #     img = process_image(frames[j], self.image_shape, chanel_3d, 0)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     j = random.randint(0, 15)
        #     img = process_image(frames[j], self.image_shape, chanel_3d, 0)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     j = random.randint(0, 15)
        #     img = process_image(frames[j], self.image_shape, chanel_3d, 0)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)
        #     result.append(img)

        return result
示例#4
0
    def train_test_generator(self, train_test):
        """Return a train or a val generator that we can use to train on.
        Depending on train_test will read from sel.train_paths or self.test_paths
        
        train_test: one of either 'train' or 'test'
        """
        data_paths = self.train_paths if train_test == 'train' else self.test_paths
        #need to extract outputs from data
        #convert (annotation_filepath, frame_filepath) -> (M, fram_filepath)
        #note: we're loading our y_truth vals into memory, but loading and preprocessing frames and gen time
        data = self.get_data_from_paths(data_paths)
        print("Creating %s generator with %d samples." %
              (train_test, len(data)))
        while 1:
            X, y = [], []
            # Generate batch_size samples.
            for _ in range(self.config['hyperparameters']['batch_size']):
                # Get a random sample.
                train_sample = random.choice(data)  # (M, frame)
                frame = process_image(train_sample[1],
                                      self.config['model']['target_shape'])
                #preprocess the frame

                inpt = frame  #for now (should preprocess further)

                output = train_sample[0]

                X.append(inpt)
                y.append(output)

            yield np.array(X), np.array(y)
示例#5
0
    def get_data_for_sagemaker(self, train_test, model_input):
        """Return a train or a val data set that we can use to train with.
        Depending on train_test will read from self.train_paths or self.test_paths

        This function is used by sagemaker
        
        train_test: one of either 'train' or 'test'

        return: np.array(data), np.array(labels)
        """
        # import tensorflow as tf

        data_paths = self.train_paths if train_test == 'train' else self.test_paths
        #need to extract outputs from data
        #convert (annotation_filepath, frame_filepath) -> (M, frame_filepath)
        data = self.get_data_from_paths(data_paths)
        outputs = []
        inputs = []

        for _ in range(self.config['hyperparameters']['batch_size']):
            # Get a random sample.
            train_sample = random.choice(data)  # (M, frame)
            frame = process_image(train_sample[1],
                                  self.config['model']['target_shape'])
            #preprocess the frame

            label = train_sample[0]

            inputs.append(frame)
            outputs.append(label)

        return np.array(inputs, dtype=np.float32), np.array(outputs,
                                                            dtype=np.float32)
示例#6
0
def main():
    model = load_model('inception.026-1.07.hdf5')  #replaced by your model name
    # Get all our test images.
    image = 'r.jpg'
    images = cv2.imread('r.jpg')
    #cv2.imshow("Image", images)
    #cv2.waitKey(0)
    # Turn the image into an array.
    image_arr = process_image(image, (299, 299, 3))
    image_arr = np.expand_dims(image_arr, axis=0)

    layer_1 = K.function([model.layers[0].input], [model.layers[1].output])
    f1 = layer_1([image_arr])[0]
    for _ in range(32):
        show_img = f1[:, :, :, _]
        show_img.shape = [149, 149]
        plt.subplot(4, 8, _ + 1)
        plt.subplot(4, 8, _ + 1)
        plt.imshow(show_img, cmap='gray')
        plt.axis('off')
    plt.show()
    # conv layer: 299
    layer_1 = K.function([model.layers[0].input], [model.layers[299].output])
    f1 = layer_1([image_arr])[0]
    for _ in range(81):
        show_img = f1[:, :, :, _]
        show_img.shape = [8, 8]
        plt.subplot(9, 9, _ + 1)
        plt.imshow(show_img, cmap='gray')
        plt.axis('off')

    plt.show()
    print('This is the end !')
def generate_heatmaps(images, model):
    target_size = get_target_size(model)

    for image_file in images:

        # process input
        x = process_image(image_file, target_size + (3, ))
        x = np.expand_dims(x, axis=0)
        #x = my_crop_function(x)

        # predict and get output
        preds = model.predict(x)
        index = np.argmax(preds[0])
        print(preds[0])
        data = DataSet()
        print('predict class:{}'.format(index))
        print('predict class:', data.classes[index])
        max_output = model.output[:, index]

        heatmap_file = get_heatmap_file(image_file,
                                        predict_class=data.classes[index])

        # detect last conv layer
        last_conv_index = detect_last_conv(model)
        last_conv_layer = model.layers[last_conv_index]
        # get gradient of the last conv layer to the predicted class
        grads = K.gradients(max_output, last_conv_layer.output)[0]
        # pooling to get the feature gradient
        pooled_grads = K.mean(grads, axis=(0, 1, 2))
        # run the predict to get value
        iterate = K.function([model.input],
                             [pooled_grads, last_conv_layer.output[0]])
        pooled_grads_value, conv_layer_output_value = iterate([x])

        # apply the activation to each channel of the conv'ed feature map
        for i in range(pooled_grads_value.shape[0]):
            conv_layer_output_value[:, :, i] *= pooled_grads_value[i]

        # get mean of each channel, which is the heatmap
        heatmap = np.mean(conv_layer_output_value, axis=-1)
        # normalize heatmap to 0~1
        heatmap = np.maximum(heatmap, 0)
        heatmap /= np.max(heatmap)
        #plt.matshow(heatmap)
        #plt.show()

        # overlap heatmap to frame image
        img = cv2.imread(image_file)
        #img = my_crop_function(img)
        heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
        heatmap = np.uint8(255 * heatmap)
        heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
        superimposed_img = heatmap * 0.4 + img

        # save overlaped image
        cv2.imwrite(heatmap_file, superimposed_img)
        print("generate heatmap file", heatmap_file)
示例#8
0
def detect_objects(data, model, image):
    action = None
    for _ in range(1):
        images = glob.glob(os.path.join('images', '*.jpg'))
        #print('-'*80)
        # Get a random row.
        sample = 0
        #print(sample)
        #print(len(images))
        image = images[sample]
        print(image)
        # Turn the image into an array.

        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)
        #print(predictions)
        # Show how much we think it's each one.
        #label_predictions = {}
        label_predictions = predictions[0]
        #run, sit, stand = label_predictions.partition(' ')
        run = int(label_predictions[0])
        print(run)
        sit = int(label_predictions[1])
        print(sit)
        stand = int(label_predictions[2])
        print(stand)
        #for m, label in enumerate(data.classes):
        #    label_predictions[label] = predictions[0][m]

        #sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)

        #for n, class_prediction in enumerate(sorted_lps):
        # Just get the top five.
        #if i > 0:
        # break

        #print(image)
        #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
        #action = class_prediction[0]
        action = None
        if (run > sit):
            #action = stand
            if (run > stand):
                action = stand
            else:
                action = stand
        elif (run < sit):
            if (sit > stand):
                action = sit
            else:
                action = stand
        print("action=", action)
    return action
示例#9
0
def main():

    lips = "../diagnostic_uploads/herpes4.png"
    tongue_ulcer = "../diagnostic_uploads/syphilis2.png"
    tongue_patch = "../diagnostic_uploads/syphilis3.png"
    gums = "../diagnostic_uploads/cancer1.png"

    files = [lips, tongue_ulcer, tongue_patch, gums]

    print(process_image(files))
示例#10
0
def upload():
    try:
        data = request.data
        data = data.decode('utf8')
        data = json.loads(data)
        image = base64.b64decode(data['image'])
        result = process_image(image)
        return make_response(jsonify(result), 200)
    except Exception as err:
        logging.error('An error has occurred whilst processing the file: "{0}"'.format(err))
        abort(400)
def generate_heatmap(image_file, model_file, heatmap_file):
    # load model, MobileNetV2 by default
    if model_file is None:
        model = MobileNetV2(weights='imagenet')
    else:
        model = load_model(model_file)
    model.summary()

    # process input
    x = process_image(image_file, get_target_size(model) + (3, ))
    x = np.expand_dims(x, axis=0)

    # predict and get output
    preds = model.predict(x)
    index = np.argmax(preds[0])
    print(preds[0])
    print('predict index: {}'.format(index))
    max_output = model.output[:, index]
    # detect last conv layer
    last_conv_index = detect_last_conv(model)
    last_conv_layer = model.layers[last_conv_index]
    # get gradient of the last conv layer to the predicted class
    grads = K.gradients(max_output, last_conv_layer.output)[0]
    # pooling to get the feature gradient
    pooled_grads = K.mean(grads, axis=(0, 1, 2))
    # run the predict to get value
    iterate = K.function([model.input],
                         [pooled_grads, last_conv_layer.output[0]])
    pooled_grads_value, conv_layer_output_value = iterate([x])

    # apply the activation to each channel of the conv'ed feature map
    for i in range(pooled_grads_value.shape[0]):
        conv_layer_output_value[:, :, i] *= pooled_grads_value[i]

    # get mean of each channel, which is the heatmap
    heatmap = np.mean(conv_layer_output_value, axis=-1)
    # normalize heatmap to 0~1
    heatmap = np.maximum(heatmap, 0)
    heatmap /= np.max(heatmap)
    #plt.matshow(heatmap)
    #plt.show()

    # overlap heatmap to frame image
    img = cv2.imread(image_file)
    heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
    heatmap = np.uint8(255 * heatmap)
    heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
    superimposed_img = heatmap * 0.4 + img

    # save overlaped image
    cv2.imwrite(heatmap_file, superimposed_img)
    print("generate heatmap file", heatmap_file)
示例#12
0
    def test_generator(self):
        '''
        Generates only input data, no validation
        '''

        for path in self.test_paths:
            try:
                single_frame_batch = np.array([
                    process_image(path, self.config['model']['target_shape'])
                ])
            except IOError as e:
                continue
            yield single_frame_batch
def main():
    """Spot-check `nb_images` images."""
    data = DataSet()
    model = load_model(
        'data/checkpoints/method1actionmix.inception.017-0.07.hdf5')

    # Get all our test images.
    images = glob.glob(os.path.join('actions', '*.jpg'))
    nb_images = len(images)
    j = 0
    for _ in range(nb_images):
        print('-' * 80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        fol1, fn1, t1 = image.partition('/')
        fol2, fn2, t2 = t1.partition('_')
        print(fol2)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)
        #print(predictions)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 0:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
            if (class_prediction[0] == fol2):
                print("true")
                j += 1
                print(j)
            else:
                print("false")
            print(j)
    print(j)
示例#14
0
def main():
    """Spot-check `nb_images` images."""
    #data = DataSet()
    classes = [
        'Anger', 'Anticipation', 'Disgust', 'Fear', 'Joy', 'Sadness',
        'Surprise', 'Trust'
    ]
    model = load_model(
        '/home/ubuntu/workspace/emai/data/checkpoints/inception-007-0.43.hdf5')
    #model = load_model('./data/checkpoints/inception.039-1.69.hdf5')
    #model = load_model('./data/checkpoints/inception.002-1.95.hdf5')
    likelihood = []
    probability = []
    # Get all our test images.
    images = glob.glob(
        '/home/ubuntu/workspace/emai/data/validation_data/*.jpg')
    for image in images:
        #for _ in range(nb_images):
        #print('-'*80)
        # Get a random row.
        #sample = random.randint(0, len(images) - 1)
        #image = images[sample]

        # Turn the image into an array.
        #print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)
        #import pdb; pdb.set_trace()
        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            #import pdb;pdb.set_trace()
            if i >= 1:
                break
            likelihood.append(class_prediction[0])
            probability.append((class_prediction[0], class_prediction[1]))
            #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            #i += 1
    print(Counter(likelihood))
    return Counter(likelihood)
示例#15
0
def validate_cnn_model(model_file):
    data = DataSet()
    model = load_model(model_file)

    # Get all our test images.
    images = glob.glob(os.path.join('data', 'test_full', '**', '*.jpg'))

    # Count the correct predict
    result_count = 0

    for image in images:
        print('-' * 80)
        # Get a random row.
        #sample = random.randint(0, len(images) - 1)
        #image = images[sample]

        # Get groundtruth class string
        class_str = image.split(os.path.sep)[-2]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (224, 224, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        # Get top-1 predict class as result
        predict_class_str = sorted_lps[0][0]
        if predict_class_str == class_str:
            result_count = result_count + 1

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1

    print("\nval_acc: %f" % (result_count / float(len(images))))
示例#16
0
def _input_fn(training_dir, data_file, config, batch_size):
    with open(data_file, 'rb') as f:
        reader = csv.reader(f)
        rows = [x for x in reader]
        frame_files = [os.path.join(training_dir, x[0]) for x in rows]
        labels = [np.array(x[1:], dtype=np.float32) for x in rows]
        zip_files_labels = zip(frame_files, labels)
        shuffle(zip_files_labels)
        frame_files = [x[0] for x in zip_files_labels[:batch_size]]
        labels = np.array([x[1] for x in zip_files_labels[:batch_size]])

    # print labels
    features = np.array([
        process_image(f, config['model']['target_shape']) for f in frame_files
    ])
    return features, labels
def main(nb_images=5):
    """Spot-check `nb_images` images."""
    data = DataSet()

    # load the trained model that has been saved in CNN_train_UCF101.py
    checkpoint = sorted(
        os.listdir('data/checkpoints/'))[-1]  # get the last checkpoint
    filename = os.path.join('data/checkpoints/', checkpoint)
    model = load_model(filename)

    # Get all our test images.
    images = glob.glob('./data/test/**/*.jpg')

    for _ in range(nb_images):
        print('-' * 80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
示例#18
0
def main(nb_images=5):
    """Spot-check `nb_images` images."""
    data = DataSet()
    model = load_model('./data/checkpoints/inception.0026-2.54.hdf5'
                       )  # replaced by your model name

    # Get all our test images.
    images = glob.glob(root_dir + 'test/*/*.jpg')

    for _ in range(nb_images):
        print('-' * 80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)
        # print(sorted_lps)
        # [('walk', 0.8944378), ('turn', 0.04855361), ('stand', 0.040282194), ('wave', 0.009945527),
        #  ('talk', 0.006243166), ('smile', 0.00053773355)]
        # print("*" * 20)
        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
def detect(image,model,data):
    # Turn the image into an array.
    image_arr = process_image(image, (299, 299, 3))
    image_arr = np.expand_dims(image_arr, axis=0)
    # Predict.
    predictions = model.predict(image_arr)
    # Show how much we think it's each one.
    label_predictions = {}
    for i, label in enumerate(data.classes):
        print(i,label)
        label_predictions[label] = predictions[0][i]

    sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)

    for i, class_prediction in enumerate(sorted_lps):
        # Just get the top five.
        if i > 0:
            break
        a = class_prediction[0]
        b = class_prediction[1]
        i += 1
    return a,b
示例#20
0
def add_scores():
    from processor import process_image
    from keras.models import load_model
    global files
    global files_with_scores

    max_mtime = 0
    model_files = glob.glob(os.path.join(modelPath, '*.hdf5'))
    for fname in model_files:
        mtime = os.stat(fname).st_mtime
        if mtime > max_mtime:
            max_mtime = mtime
            max_file = fname
    model = load_model(max_file)

    while len(files):
        current_file = files.pop()

        classification = ""
        for c in classes:
            if current_file.find(c) >= 0:
                classification = c
                break

        image_arr = process_image(current_file, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)
        predictions = model.predict(image_arr)
        prediction = predictions[0]
        if not classification == "":
            score = prediction[classes.index(classification)]
        else:
            score = np.amax(prediction)
            classification = prediction.index(score)

        files_with_scores.append(PictureInfo(
            current_file, classification, score))

        files_with_scores.sort(reverse=True)
def main(nb_images=5):
    """Spot-check `nb_images` images."""
    data = DataSet()
    #Load our model
    model = load_model('model.py')

    # Get all our test images.
    images = glob.glob('./data/test/**/*.jpg')

    for _ in range(nb_images):
        print('-' * 80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        image = images[sample]

        # Turn the image into an array.
        print(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(data.classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
示例#22
0
def generate_featuremap(image_file, model_file, featuremap_path,
                        model_image_size):
    model = load_model(model_file)
    model.summary()
    image_shape = get_target_size(
        model) if model_image_size is None else model_image_size
    image_arr = process_image(image_file, image_shape + (3, ))
    image_arr = np.expand_dims(image_arr, axis=0)

    # Create featuremap dir
    touchdir(featuremap_path)

    for conv_layer in detect_conv_layers(model):
        # Get conv layer output
        layer_func = K.function([model.layers[0].input],
                                [model.layers[conv_layer].output])
        layer_output = layer_func([image_arr])[0]
        # Arrange featuremap on one pic
        height, width = get_subplot_size(layer_output.shape[-1])
        rows, cols, channels = get_featuremap_shape(layer_output)

        for _ in range(layer_output.shape[-1]):
            show_img = layer_output[:, :, :, _]
            show_img.shape = [rows, cols]
            plt.subplot(height, width, _ + 1)
            plt.imshow(show_img)
            plt.axis('off')

        # Store the featuremap pic
        file_name = 'featuremap_layer{}_{}_{}_{}.jpg'.format(
            conv_layer, rows, cols, channels)
        file_name = os.path.join(featuremap_path, file_name)
        print('save feature map', file_name)
        plt.savefig(file_name, dpi=100, quality=95)
        plt.show()

    print('feature map extract done')
示例#23
0
def build_image_sequence(frames):
    """Given a set of frames build the sequence."""
    return [process_image(x, (256,293,3)) for x in frames]
示例#24
0
def main(nb_images=5):
    """Spot-check `nb_images` images."""
    data = DataSet()
    model = load_model('3motion.hdf5')

    # Get all our test images.
    images = glob.glob(os.path.join('images', '*.jpg'))
    nb_images = len(images)
    for _ in range(nb_images):
        #print('-'*80)
        # Get a random row.
        sample = random.randint(0, len(images) - 1)
        #print(sample)
        #print(len(images))
        image = images[sample]

        # Turn the image into an array.

        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        cars = [1, 2, 3]
        for i, label in enumerate(cars):
            label_predictions[label] = predictions[0][i]
        #print(label_predictions)
        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 0:
                break

            #print(image)
            fol, fn1, t1 = image.partition('/')
            #print(fol)
            #print(t1)
            #print(fn1)
            fn, do, t2 = t1.partition('.')
            #print(fn)
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
            conn = psycopg2.connect(database="dateandtime",
                                    user="******",
                                    password="******",
                                    host="127.0.0.1",
                                    port="5432")

            #print ("Opened database successfully")

            cur = conn.cursor()

            #cur.execute("SELECT id, fn, mdy, dy, tm, zn, ps, ac from db1")

            #cur.execute("INSERT INTO DATE_TIME (ID,MDY,DY,TM) VALUES (1,aa,bb,cc)")
            cur.execute("UPDATE db11 SET ac = %s WHERE fn = %s",
                        (class_prediction[0], fn))

            conn.commit()
            #print ("Records created successfully")
            conn.close()
示例#25
0
def main(nb_images=5):
    """Spot-check `nb_images` images."""
    # Get all our test images.
    images = glob.glob(os.path.join(srcPath, '*.jpg'))
    images = images + glob.glob(os.path.join(srcPath, '**', '*.jpg'))
    #random.shuffle(images)
    images.sort(reverse=True)
    print("found %d images." % len(images))

    # Load the most recent model
    max_mtime = 0
    files = glob.glob(os.path.join(modelPath, '*.hdf5'))
    for fname in files:
        mtime = os.stat(fname).st_mtime
        if mtime > max_mtime:
            max_mtime = mtime
            max_file = fname
    model = load_model(max_file)

    for image in images:
        print('-' * 80)
        print(image)

        # Turn the image into an array.
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = model.predict(image_arr)

        # Show how much we think it's each one.
        label_predictions = {}
        for i, label in enumerate(classes):
            label_predictions[label] = predictions[0][i]

        sorted_lps = sorted(label_predictions.items(),
                            key=operator.itemgetter(1),
                            reverse=True)

        for i, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            if i > 4:
                break
            print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            i += 1
        # sort the files into folders by category
        filename = os.path.split(image)[1]
        if (sorted_lps[0][1] > 0.80):
            name, extention = filename.split('.')
            dest = os.path.join(
                dstPath, sorted_lps[0][0], name +
                '{:2d}'.format(int(sorted_lps[0][1] * 100)) + '.' + extention)
            if (os.path.isfile(dest)):
                os.remove(dest)
            os.rename(image, dest)
        else:
            fname, extention = filename.split(".")
            filename = fname + "_" + \
                sorted_lps[0][0] + "_" + sorted_lps[1][0] + "." + extention
            dest = os.path.join(dstPath, "unsure", filename)
            if (os.path.isfile(dest)):
                os.remove(dest)
            os.rename(image, dest)
 def build_image_sequence(self, frames):
     """Given a set of frames (filenames), build our sequence."""
     return [process_image(x, self.image_shape) for x in frames]
示例#27
0
 def build_image_sequence(self, frames, add_noise):
     """Given a set of frames (filenames), build our sequence."""
     bool_addnoise = add_noise
     return [process_image(frames, self.image_shape, bool_addnoise)]
示例#28
0
 def build_image_sequence(self, frames):
     """Given a set of frames (filenames), build our sequence."""
     return [process_image(x, self.image_shape) for x in frames]
def detect_objects(data, model, image, datafaces1):
    action = None
    for _ in range(1):
        images = glob.glob(os.path.join('images', '*.jpg'))
        #print('-'*80)
        # Get a random row.
        sample = 0
        #print(sample)
        #print(len(images))
        image = images[sample]
        print(image)
        # Turn the image into an array.
        image2 = cv2.imread(image)
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = None
        predictions = model.predict(image_arr)
        #print(predictions)
        # Show how much we think it's each one.
        #label_predictions = {}
        label_predictions = predictions[0]
        #run, sit, stand = label_predictions.partition(' ')
        run = label_predictions[0]
        print(run)
        sit = label_predictions[1]
        print(sit)
        stand = label_predictions[2]
        print(stand)
        #for m, label in enumerate(data.classes):
        #    label_predictions[label] = predictions[0][m]

        #sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)

        #for n, class_prediction in enumerate(sorted_lps):
        # Just get the top five.
        #if i > 0:
        # break

        #print(image)
        #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
        #action = class_prediction[0]
        if (run > sit):
            #action = stand
            if (run > stand):
                action = "stand"
            else:
                action = "stand"
        elif (run < sit):
            if (sit > stand):
                action = "sit"
            else:
                action = "stand"
        print("aciton=%s", action)

        person = None
        imagepathhh = str(image)
        #print(imagepathhh)
        imagepathh = imagepathhh.partition('/')
        savepath = 'images'
        os.chdir(savepath)
        print(imagepathh)
        imageface = cv2.imread(imagepathh[2])
        os.chdir('..')
        rgb = cv2.cvtColor(imageface, cv2.COLOR_BGR2RGB)

        # detect the (x, y)-coordinates of the bounding boxes corresponding
        # to each face in the input image, then compute the facial embeddings
        # for each face
        print("[INFO] recognizing faces...")
        boxes = face_recognition.face_locations(rgb, model='cnn')
        encodings = face_recognition.face_encodings(rgb, boxes)

        # initialize the list of names for each face detected
        names = []

        # loop over the facial embeddings
        for encoding in encodings:
            # attempt to match each face in the input image to our known
            # encodings
            matches = face_recognition.compare_faces(data["encodings"],
                                                     encoding)
            name = "Unknown"

            # check to see if we have found a match
            if True in matches:
                # find the indexes of all matched faces then initialize a
                # dictionary to count the total number of times each face
                # was matched
                matchedIdxs = [i for (i, b) in enumerate(matches) if b]
                counts = {}

                # loop over the matched indexes and maintain a count for
                # each recognized face face
                for i in matchedIdxs:
                    name = data["names"][i]
                    counts[name] = counts.get(name, 0) + 1

                # determine the recognized face with the largest number of
                # votes (note: in the event of an unlikely tie Python will
                # select first entry in the dictionary)
                name = max(counts, key=counts.get)

            # update the list of names
            names.append(name)
            person = name

        # loop over the recognized faces
        #for ((top, right, bottom, left), name) in zip(boxes, names):
        # draw the predicted face name on the image
        #cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
        #y = top - 15 if top - 15 > 15 else top + 15
        #cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
        # 0.75, (0, 255, 0), 2)

    return action, person
示例#30
0
def uploaded_file(filename):
    res = process_image(filename)
    return render_template('processing.html', file_name=filename)
示例#31
0
 def build_image_sequence(self, frames):
     return [process_image(x, self.image_shape) for x in frames]
示例#32
0
def detect_objects(data, model, image):
    action=None
    for _ in range(1):
        images = glob.glob(os.path.join('images','*.jpg'))
        #print('-'*80)
        # Get a random row.
        sample = 0
        #print(sample)
        #print(len(images))
        image = images[sample]
        print(image)
        imagepathh = image.partition('/')
        savepath = 'images'
        os.chdir(savepath)
        #print(imagepathh)
        #imageface=cv2.imread(imagepathh[2])
        # Turn the image into an array.
        if os.path.getsize(imagepathh[2]):
        # Execute!
            print('image found')
        else:
            os.remove(imagepathh[2])
            image = images[2]
        os.chdir('..')
        image_arr = process_image(image, (299, 299, 3))
        image_arr = np.expand_dims(image_arr, axis=0)

        # Predict.
        predictions = None
        predictions = model.predict(image_arr)
        #print(predictions)
        # Show how much we think it's each one.
        #label_predictions = {}
        label_predictions = predictions[0]
        #run, sit, stand = label_predictions.partition(' ')
        run = label_predictions[0]
        print(run)
        sit = label_predictions[1]
        print(sit)
        stand = label_predictions[2] 
        print(stand)
        #for m, label in enumerate(data.classes):
        #    label_predictions[label] = predictions[0][m]

        #sorted_lps = sorted(label_predictions.items(), key=operator.itemgetter(1), reverse=True)
        
        #for n, class_prediction in enumerate(sorted_lps):
            # Just get the top five.
            #if i > 0:
               # break

            #print(image)
            #print("%s: %.2f" % (class_prediction[0], class_prediction[1]))
            #action = class_prediction[0]
        if (run > sit):
            #action = stand
            if (run > 0.5):
                action = "sitting"
            else:
                action = "sitting"
        elif (run < sit):
            if (sit > stand):
                action = "sitting"
            else:
                action = "standing"
        print("aciton=%s",action)
        framescan1 = timetime+'_'+str(i)+'.jpg'
        os.chdir(savepath)
        try: 
            os.remove(framescan1)
        except: pass
        os.chdir('..')

    return action