コード例 #1
0
# image = cv2.imread(base_path+sample_image)
# cv2.imshow('img',image)
# cv2.waitKey(0)
img = cv2.imread(base_path+sample_image)
# image = read_image(base_path+sample_image)
# plt.imshow(image)
# plt.show()
# cv2.imshow('img',img)
# cv2.waitKey(0)

dataset = Dataset(base_path)
img, targets = dataset[10]
show_labeled_image(img, targets['boxes'], targets['labels'])

labels = ['spiderman', 'venom']
model = Model(labels)
model.fit(dataset)
torch.save(model, 'model.pth')

# directory = r'C:\Users\kapsi\Desktop\WdPO_P\test'
# file_count = sum(len(files) for _, _, files in os.walk(directory))
# if file_count == 0:
#    print('Theres nothing to show! Closing...')
#
# else:
#    pics = list()
#    for i in range(file_count):
#       path = test_data + '{}.jpg'.format(i)
#       img_test = cv2.imread(path)
#       img_test = cv2.cvtColor(img_test, cv2.COLOR_BGR2RGB)
#       pics.append(img_test)
コード例 #2
0
from sys import argv
import json
#from DetectDigits import get_output_image
import cv2
import os
import ocr

from detecto.core import Model
from detecto.utils import read_image

model = Model.load('localization_model.pth', ['digits'])

path = argv[1]
image_name = argv[2]
filename = path + '\\' + image_name
data = {}

#cascade = cv2.CascadeClassifier(path + "\\Cascades\\cascade.xml")
#img = cv2.imread(filename)
img = read_image(filename)
top_preds = model.predict_top(img)
res = top_preds[1].numpy()[0]
#numbers = cascade.detectMultiScale(img, 1.1, 3)

x, y = int(res[0]), int(res[1])
w = int(res[2])
h = int(res[3])
w = w - x
h = h - y
img = img[y:y + h, x:x + w]
temp_path = "res.png"
コード例 #3
0
# findFlag.py

from detecto.core import Model
import cv2  #Used for loading the image into memory

# First, let's load our trained model from the Training section
# We need to specify the label which we want to find (the same one from Classification and Training)
model = Model.load('model.pth', ['aboriginal_flag'])

# Now, let's load a sample image into memory
# Change the file name below if you want to test other potential samples
image = cv2.imread("samples/sample.jpg")

# model.predict() is the method we call with our image as an argument
# to try find our desired object in the sample image using our pre-trained model.
# It will do a bit of processing and then spit back some numbers.
# The numbers define what it thinks the bounding boxes are of potential matches.
# And the probability that the bounding box is recognizing the object (flag).
labels, boxes, scores = model.predict(image)

# Below we are just printing the results, predict() will
# give back a couple of arrays that represent the bounding box coordinates and
# probability that the model believes that the box is a match
# The coordinates are (xMin, yMin, xMax, yMax)
# Using this data, you could just open the original image in an image editor
# and draw a box around the printed coordinates
print(labels, boxes, scores)

# WARNING: You don't have to understand this part, I barely do.
# All this code does is draw rectangles around the model predictions above
# and outputs to the display for your viewing pleasure.
コード例 #4
0
def detect(radar, img, file, locDat, sweep, detdir, vis, cint):
    model = Model.load('RASRmodl.pth', ['fall'])
    pred = model.predict(img)
    #print(max(pred[2]))
    for n in range(len(pred[1])):
        if (pred[2][n] > cint):
            bound = 0.5  # The unmapped location data is about 2x as far as it should be.
            # I don't know why this, and this is a temp solution.
            xdat, ydat = bound * 1000 * locDat[0], bound * 1000 * locDat[1]

            t = round(locDat[2], 2)
            name, date, btime, dtstr = stringed(file)
            atime = (datetime.strptime(btime, '%m/%d/%Y %H:%M:%S') +
                     timedelta(seconds=t))

            x0p, y0p, x1p, y1p = float(pred[1][n][0]), float(
                pred[1][n][1]), float(pred[1][n][2]), float(pred[1][n][3])
            xp, yp = (x1p + x0p) / 2, (y1p + y0p) / 2

            xdm = [np.amin(xdat), np.amax(xdat)]
            ydm = [np.amin(ydat), np.amax(ydat)]
            Xv = np.linspace(xdm[0], xdm[1], 2500)
            Yv = np.linspace(ydm[1], ydm[0], 2500)

            x, y = Xv[int(xp)], Yv[int(yp)]
            x0, y0 = Xv[int(x0p)], Yv[int(y0p)]
            x1, y1 = Xv[int(x1p)], Yv[int(y1p)]

            if (
                    vis == True
            ):  # Saves the image with a bounding box, detection type, and confidence level
                fig = plt.figure(figsize=(25, 25))
                ax = fig.add_axes([0, 0, 1, 1])
                ax.imshow(img)
                w, h = abs(x0p - x1p), abs(y0p - y1p)
                rect = patches.Rectangle((x0p, y0p),
                                         w,
                                         h,
                                         linewidth=1,
                                         edgecolor='r',
                                         facecolor='none')
                ax.add_patch(rect)
                detstr = pred[0][n] + ': ' + str(round(float(pred[2][n]), 2))
                plt.text(x0p + w / 2,
                         y0p - 5,
                         detstr,
                         fontsize=8,
                         color='red',
                         ha='center')
                imname = detdir + file + '_' + sweep + '_detected' + '.png'
                plt.savefig(imname, bbox_inches='tight')

            # Finding Geodetic coordinates from relative distance to site:
            z = np.sqrt(x**2 + y**2) * np.tan(np.radians(float(sweep)))
            sitealt, sitelon, sitelat = float(radar.altitude['data']), float(
                radar.longitude['data']), float(radar.latitude['data'])
            lon, lat = np.around(
                pyart.core.cartesian_to_geographic_aeqd(
                    x, y, sitelon, sitelat), 2)
            lon0, lat0 = np.around(
                pyart.core.cartesian_to_geographic_aeqd(
                    x0, y0, sitelon, sitelat), 3)
            lon1, lat1 = np.around(
                pyart.core.cartesian_to_geographic_aeqd(
                    x1, y1, sitelon, sitelat), 3)
            lon, lon0, lon1 = -lon, -lon0, -lon1
            alt = round(z + sitealt, 2)

            return [lat, lon, alt, atime], [lat0, lon0, lat1, lon1, alt, atime]
        else:
            pass
コード例 #5
0
import matplotlib.pyplot as plt
from torchvision import transforms
from detecto.utils import normalize_transform
from detecto.core import Dataset, DataLoader, Model

IMAGE_DIR = '/Users/noahmushkin/codes/selenium-python-scraping/data/images/cameras/'
LABEL_DIR = '/Users/noahmushkin/codes/selenium-python-scraping/data/labeled_cams_convert/'

img_transform = transforms.Compose([
    transforms.ToPILImage(),
    # Note: all images with a size smaller than 800 will be scaled up in size
    transforms.Resize(400),
    transforms.RandomHorizontalFlip(0.5),
    transforms.ColorJitter(saturation=0.2),
    transforms.ToTensor(),  # required
    normalize_transform(),  # required
])

dataset = Dataset(LABEL_DIR, IMAGE_DIR, transform=img_transform)
labels = ['camera']
model = Model(classes=labels)
loader = DataLoader(dataset, batch_size=32, shuffle=True)
losses = model.fit(loader, epochs=10, learning_rate=0.005)
plt.plot(losses)
plt.show()
model.save('cam_model.pth')
コード例 #6
0
from detecto.core import Model 
from detecto.visualize import detect_live

model = Model()
 

 
  
detect_live(model, score_filter=0.8)
コード例 #7
0
ファイル: helpers.py プロジェクト: wutijat/detecto
def get_model():
    return Model(['test1', 'test2', 'test3'])
コード例 #8
0
 def __init__(self):
     self._model = Model()
コード例 #9
0
class Detector(object):
    def __init__(self):
        self._model = Model()

    def predict(self, img):
        return self._model.predict(img)
コード例 #10
0
# train.py

# Import detecto libs, the lib is great and does all the work
# https://github.com/alankbi/detecto
from detecto import core
from detecto.core import Model

# Load all images and XML files from the Classification section
# or point it at "images" if you classified your own
dataset = core.Dataset('images_classified/')

# We initalize the Model and map it to the label we used in labelImg classification
model = Model(['aboriginal_flag'])

# The model.fit() method is the bulk of this program
# It starts training your model synchronously (the lib doesn't expose many logs)
# It will take up quite a lot of resources, and if it crashes on your computer
# you will probably have to rent a bigger box for a few hours to get this to run on.
# Epochs essentially means iterations, the more the merrier (accuracy) (up to a limit)
# It will take quite a while (15 mins?) for this process to end, grab a wine.
# If you get process killed, this process is memory bound. Try increase your disk swap ram.
model.fit(dataset, epochs=3, verbose=True)

# TIP: The more images you classify and the more epochs you run, the better your results will be.

# Once the model training has finished, we can save to a single file.
# Pass this file around to anywhere you want to now use your newly trained model.
model.save('model.pth')

# If you have got this far, you've already trained your very own unique machine learning model
# What are you going to do with this new found power?
コード例 #11
0
from detecto.core import Model
from detecto import utils, visualize
from detecto.core import Dataset

# If your images and labels are in separate folders
dataset = Dataset('/Users/mahesh/mltrain/')

from detecto.visualize import show_labeled_image

image, targets = dataset[0]
#show_labeled_image(image, targets['boxes'], targets['labels'])

your_labels = ['malu']
model = Model(your_labels)

model.fit(dataset, verbose=True)
コード例 #12
0
ファイル: torchtrain.py プロジェクト: bgmiller100/rasr
Training script for Py-Torch based Convolutional Neural Network Object Detection of radar data

@author: Yash Sarda
"""

from detecto.core import Model, Dataset
import matplotlib
import matplotlib.pyplot as plt

########################################################

tdataset = Dataset('training/2500/train/')  # Training dataset
vdataset = Dataset('training/2500/test/')  # Evaluation dataseet

model = Model(['fall'])

# Keep the learning rate low, otherwise the loss will be too high
loss = model.fit(tdataset,
                 vdataset,
                 epochs=15,
                 learning_rate=0.001,
                 gamma=0.2,
                 lr_step_size=5,
                 verbose=True)

plt.plot(loss)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
model.save('RASRmodl.pth')