Example #1
0
def main():
    parser = argparse.ArgumentParser(description='Train Network')
    parser.add_argument('--data-dir', default='data', help='data directory')
    args = parser.parse_args()
    
    td = TrainingData(args.data_dir)
    
    with tf.Session() as sess:
        net = SSD(sess)
        net.create_from_vgg(args.vgg_dir, td.num_classes, td.conf)
        
        labels = tf.placeholder(tf.float32, shape=[None, None, td.num_classes+5])
        optimizer, loss = net.get_optimizer(labels)
        summary_writer = tf.summary.FileWriter(args.tensorboard_dir, sess.graph)
        saver = tf.train.Saver(max_to_keep=10)
        n_batches = int(math.ceil(td.num_train/args.batch_size))
        init_vars(sess)
        
        validation_loss = tf.placeholder(tf.float32)
        validation_loss_summary_op = tf.summary.scalar('validation_loss', validation_loss)
        training_loss = tf.placeholder(tf.float32)
        training_loss_summary_op = tf.summary.scalar('training_loss', training_loss)
        
        for e in range(args.epochs):
            generator = td.train_generator(args.batch_size)
            description = 'Epoch {}/{}'.format(e+1, args.epochs)
            training_loss_total = 0
            for x, y in tqdm(generator, total=n_train_batches, desc=description, unit='batches'):
                feed = {net.image_input: x,
                       labels: y, net.keep_prob: 1}
                loss_batch, _ = sess.run([loss, optimizer], feed_dict=feed)
                training_loss_total += loss_batch * x.shape[0]
            training_loss_total /= td.num_train
            
            generator = tf.valid_generator(args.batch_size)
            validation_loss_total = 0
            for x, y in generator:
                feed = {net.image_input: x,
                       labels: y, net.keep_prob: 1}
                loss_batch, _ = sess.run([loss], feed_dict=feed)
                validation_loss_total += loss_batch * x.shape[0]
            validation_loss_total /= td.num_valid
                
            feed = {validation_loss: validation_loss_total,
                    training_loss:   training_loss_total}
            loss_summary = sess.run([validation_loss_summary_op,
                                     training_loss_summary_op],
                                    feed_dict=feed)
            summary_writer.add_summary(loss_summary[0], e)
            summary_writer.add_summary(loss_summary[1], e)    
            
            if (e+1) % args.checkpoint_interval == 0:
                checkpoint = '{}/e{}.ckpt'.format(args.name, e+1)
                saver.save(sess, checkpoint)

        checkpoint = '{}/final.ckpt'.format(args.name)
        saver.save(sess, checkpoint)
        
    return 0
Example #2
0
def detect_img(indir,outdir):
    ssd = SSD()
    #遍历该目录下的所有图片文件
    for filename in glob.glob(indir):
        print("Start, the detect image is:",filename)
        img = Image.open(filename)
        img = ssd.detect_image(img)
        #img.show() # 显示图片
        img.save(os.path.join(outdir,os.path.basename(filename)))
        print("End, the detection of this image")
        print('---------------------------------')
class Solver:
    def __init__(self, dirname=DEFAULT_MODEL_DIR, gpu=-1,
            nms_thresh=DEFAULT_NMS_THRESH, score_thresh=DEFAULT_SCORE_THRESH):
        with open(os.path.join(dirname, "model.json"), 'r') as fp:
            metadata = json.load(fp)

        n_class = metadata['n_class']
        n_channel = metadata['n_channel']
        npz_file = metadata['file']
        self.class_labels = metadata['class_labels']

        self.model = SSD(n_class=n_class, n_channel=n_channel,
            nms_thresh=nms_thresh, score_thresh=score_thresh,
            grids=DEFAULT_GRIDS, aspect_ratios=DEFAULT_ASPECT_RATIOS,
            variance=DEFAULT_VARIANCE)
        chainer.serializers.load_npz(os.path.join(dirname, npz_file), self.model)

        if gpu >= 0:
            chainer.backends.cuda.get_device_from_id(gpu).use()
            self.model.to_gpu(gpu)

    @property
    def xp(self):
        return self.model.xp

    def solve(self, filename):
        xp = self.xp
        gif = cv2.VideoCapture(filename)
        _, color_image = gif.read(0)
        gray_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
        h, w = gray_image.shape[:2]
        img = xp.array(gray_image / 255.0, dtype=xp.float32).reshape(1, 1, h, w)

        output = self.model.predict(img)
        bbox, label, score = output[0][0], output[1][0], output[2][0]
        bbox = chainer.dataset.to_device(-1, bbox)
        label = chainer.dataset.to_device(-1, label)
        score = chainer.dataset.to_device(-1, score)

        if len(label) > NCHARS:
            indices = np.argsort(score)[-1:-NCHARS-1:-1]
            bbox = bbox[indices]
            label = label[indices]
            score = score[indices]
        bbox = np.vectorize(lambda v: int(v + 0.5), otypes=[int])(bbox)

        indices = np.argsort(bbox[:, 1])
        text = ''.join([ self.class_labels[label[i]] for i in indices ])

        return text, bbox[indices], score[indices]
Example #4
0
    def __init__(self):
        Gtk.Window.__init__(self)

        self.host = Host()
        self.cpu = CPU()
        self.gpu_integrated = GPU_Integrated()
        self.gpu_discrete = GPU_Discrete()
        self.ssd = SSD()
        self.bat = Battery()

        self.notebook = Gtk.Notebook()
        self.add(self.notebook)

        self.page_1 = Gtk.Box()
        self.__fill_page_1()
        self.notebook.append_page(self.page_1, Gtk.Label('Hardware Monitor'))


        self.page_2 = Gtk.Box()
        self.__fill_page_2()
        self.notebook.append_page(self.page_2, Gtk.Label('Hardware Info'))


        self.page_3 = Gtk.Box()
        self.__fill_page_3()
        self.notebook.append_page(self.page_3, Gtk.Label('About'))
    def __init__(self, dirname=DEFAULT_MODEL_DIR, gpu=-1,
            nms_thresh=DEFAULT_NMS_THRESH, score_thresh=DEFAULT_SCORE_THRESH):
        with open(os.path.join(dirname, "model.json"), 'r') as fp:
            metadata = json.load(fp)

        n_class = metadata['n_class']
        n_channel = metadata['n_channel']
        npz_file = metadata['file']
        self.class_labels = metadata['class_labels']

        self.model = SSD(n_class=n_class, n_channel=n_channel,
            nms_thresh=nms_thresh, score_thresh=score_thresh,
            grids=DEFAULT_GRIDS, aspect_ratios=DEFAULT_ASPECT_RATIOS,
            variance=DEFAULT_VARIANCE)
        chainer.serializers.load_npz(os.path.join(dirname, npz_file), self.model)

        if gpu >= 0:
            chainer.backends.cuda.get_device_from_id(gpu).use()
            self.model.to_gpu(gpu)
    def __init__(self):
        self.node_name = "ssd_keras"
        rospy.init_node(self.node_name)
        self.class_names = [
            "background", "aeroplane", "bicycle", "bird", "boat", "bottle",
            "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse",
            "motorbike", "person", "pottedplant", "sheep", "sofa", "train",
            "tvmonitor"
        ]
        self.num_classes = len(self.class_names)
        self.input_shape = (300, 300, 3)
        self.model = SSD(self.input_shape, num_classes=self.num_classes)
        self.model.load_weights(pkg_path +
                                '/resources/ssd_keras/weights_SSD300.hdf5')

        self.bbox_util = BBoxUtility(self.num_classes)
        self.conf_thresh = 0.25

        self.model._make_predict_function()
        self.graph = tf.get_default_graph()

        self.detection_index = DL_msgs_boxes()

        # Create unique and somewhat visually distinguishable bright
        # colors for the different classes.
        self.class_colors = []
        for i in range(0, self.num_classes):
            # This can probably be written in a more elegant manner
            hue = 255 * i / self.num_classes
            col = np.zeros((1, 1, 3)).astype("uint8")
            col[0][0][0] = hue
            col[0][0][1] = 128  # Saturation
            col[0][0][2] = 255  # Value
            cvcol = cv2.cvtColor(col, cv2.COLOR_HSV2BGR)
            col = (int(cvcol[0][0][0]), int(cvcol[0][0][1]),
                   int(cvcol[0][0][2]))
            self.class_colors.append(col)

        self.bridge = CvBridge()  # Create the cv_bridge object

        self.Image_Status = "Not_Ready"
        self.StartImage = cv2.imread(pkg_path + '/resources/start.jpg')
        self.to_draw = cv2.resize(self.StartImage, (640, 480))

        self.image_sub = rospy.Subscriber(
            "/floating_sensor/camera/rgb/image_raw",
            Image,
            self.detect_image,
            queue_size=1)  # the appropriate callbacks

        self.box_coordinate_pub = rospy.Publisher(
            "/ssd_detction/box", DL_msgs_boxes,
            queue_size=5)  # the appropriate callbacks
        self.SSD_Serv = rospy.Service('SSD_Detection', DL_box,
                                      self.SSD_Detection_Server)
Example #7
0
def build():
    model = SSD()

    image = fluid.layers.data(
        name='image', shape=[3, 300, 300], dtype='float32')
    gt_box = fluid.layers.data(
        name='gt_box', shape=[4], dtype='float32', lod_level=1)
    gt_label = fluid.layers.data(
        name='gt_label', shape=[1], dtype='int32', lod_level=1)

    return model(image, gt_box, gt_label)
Example #8
0
def main():
    try:
        checkpoint = torch.load(config.PATH_TO_CHECKPOINT, map_location=torch.device('cpu'))
        start_epoch = checkpoint['epoch'] + 1
        print('\nLoaded checkpoint from epoch %d.\n' % start_epoch)
        model = checkpoint['model']
        optimizer = checkpoint['optimizer']
    except FileNotFoundError:
        print('PATH_TO_CHECKPOINT not specified in SSDConfig.\nMaking new model and optimizer.')
        start_epoch = 0
        model = SSD(config)
        model_parameters = utils.get_model_params(model)
        optimizer = SGD(params=[{'params': model_parameters['biases'], 'lr': 2 * config.LEARNING_RATE},
                            {'params': model_parameters['not_biases']}],
                            lr=config.LEARNING_RATE,
                            momentum=config.MOMENTUM,
                            weight_decay=config.WEIGHT_DECAY)
 
    # dataloader
    df = get_dataframe(config.PATH_TO_ANNOTATIONS)
    dataset = ShelfImageDataset(df, config.PATH_TO_IMAGES, train=True)
    dataloader = DataLoader(dataset,
                            shuffle=True,
                            collate_fn=collate_fn,
                            batch_size=config.TRAIN_BATCH_SIZE,
                            num_workers=config.NUM_DATALOADER_WORKERS)
    
    # move to device
    model.to(device)
    criterion = MultiBoxLoss(model.priors_cxcy, config).to(device)
    # num epochs to train
    epochs = config.NUM_ITERATIONS_TRAIN // len(dataloader)
    # epoch where LR is decayed
    decay_at_epoch = [int(epochs*x) for x in config.DECAY_LR_AT]
    # fooh!!!! :)
    for epoch in range(start_epoch, epochs):
        if epoch in decay_at_epoch:
            utils.adjust_learning_rate(optimizer, config.DECAY_FRAC)
        train(dataloader, model, criterion, optimizer, epoch)
        utils.save_checkpoint(epoch, model, optimizer, config, config.PATH_TO_CHECKPOINT)
Example #9
0
    def __init__(self,
                 modelfile,
                 shape=(300, 300, 3),
                 num_classes=21,
                 conf_thresh=0.6):

        self.input_shape = shape
        self.num_classes = num_classes
        self.conf_thresh = conf_thresh

        # モデル作成
        model = SSD(shape, num_classes=num_classes)
        model.load_weights(modelfile)
        self.model = model

        # バウンディングボックス作成ユーティリティ
        self.bbox_util = BBoxUtility(self.num_classes)
Example #10
0
def initialize_net() -> None:
    global ssd_net

    # if already defined, return it
    if ssd_net is not None:
        print('use cached ssd_net')
        return ssd_net

    # get device ( cpu / gpu ) to be used
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    print(f'device : {device}')

    ssd_cfg = {
        'num_classes':
        num_classes,  # number of classes including background class
        'input_size': Parameters.IMG_SIZE,
        'bbox_aspect_num': Parameters.BBOX_ASPECT_NUM,
        'feature_maps': Parameters.FEATURE_MAPS,
        'steps': Parameters.STEPS,
        'min_sizes': Parameters.MIN_SIZES,
        'max_sizes': Parameters.MAX_SIZES,
        'aspect_ratios': Parameters.ASPECT_RATIOS,
        'conf_thresh': Parameters.CONF_THRESHOLD,
        'top_k': Parameters.TOP_K,
        'nms_thresh': Parameters.NMS_THRESHOLD
    }
    print(f'initializing ssd with : {ssd_cfg}')
    ssd_net = SSD(phase="inference", cfg=ssd_cfg)

    # load weight created in training
    weight_file_path = os.path.join(Parameters.ABEJA_TRAINING_RESULT_DIR,
                                    'model.pth')
    print(f'weight_file_path : {weight_file_path}')
    # cf. https://pytorch.org/tutorials/beginner/saving_loading_models.html#save-on-gpu-load-on-gpu
    weight = torch.load(weight_file_path, map_location=device)
    ssd_net.load_state_dict(weight)

    ssd_net = ssd_net.to(device)
    ssd_net.eval()
    return ssd_net
Example #11
0
def load_model():
    global file_num
    file_num = 0
    global class_model
    class_model = feature_extractor()
    model_path = './class_model.pki'
    tmp = torch.load(model_path, map_location={'cuda:0': 'cpu'})
    class_model.load_state_dict(tmp)
    class_model.eval()
    del tmp

    global object_model
    object_model = SSD(depth=50, width=1)
    model_path = './ssd_patch.pki'
    tmp = torch.load(model_path, map_location={'cuda:0': 'cpu'})
    object_model.load_state_dict(tmp)
    object_model.eval()
        cv2.imshow("SSD result", orig_image)
        if cv2.waitKey(5) & 0xFF == ord('s'):
            if len(image_stack) == frame_number:
                if not os.path.exists(save_path + str(sample_count + 1)):
                    os.mkdir(save_path + str(sample_count + 1))
                for pic in range(frame_number):
                    cv2.imwrite(
                        save_path + str(sample_count + 1) + '/' +
                        str(1000 + pic) + '.jpg', image_stack[pic])
                    print('saving ' + save_path + str(sample_count + 1) + '/' +
                          str(1000 + pic) + '.jpg')
                image_stack = []
                empty_count = 0
                sample_count += 1


if __name__ == '__main__':
    action_class = 'stand/'
    root_path = 'images/'
    save_path = root_path + action_class
    if not os.path.exists(root_path):
        os.mkdir(root_path)
    if not os.path.exists(save_path):
        os.mkdir(save_path)

    save_frames = 16
    input_shape = (300, 300, 3)
    ssd_model = SSD(input_shape, num_classes=21)
    ssd_model.load_weights('weights_SSD300.hdf5')
    run_camera(input_shape, ssd_model, save_path, save_frames)
Example #13
0

def normalize(I):
    # 归一化梯度map,先归一化到 mean=0 std=1
    norm = (I - I.mean()) / I.std()
    # 把 std 重置为 0.1,让梯度map中的数值尽可能接近 0
    norm = norm * 0.1
    # 均值加 0.5,保证大部分的梯度值为正
    norm = norm + 0.5
    # 把 0,1 以外的梯度值分别设置为 0 和 1
    norm = norm.clip(0, 1)
    return norm


if __name__ == '__main__':
    ssd = SSD()
    model = get_ssd("train", 3)  # ssd.net
    model.load_state_dict(
        torch.load(
            "F:/Iris_SSD_small/ssd-pytorch-master/logs/Epoch50-Loc0.0260-Conf0.1510.pth",
            map_location=torch.device('cuda')))
    ssd.net = model.eval()
    ssd.net = torch.nn.DataParallel(ssd.net)
    # ssd.net = ssd.net.cpu()  # ****

    # criterion = MultiBoxLoss(3, 0.5, True, 0, True, 3, 0.5,False, True)
    model = ssd.net.module
    imgPath = '1.bmp'
    image = Image.open(imgPath)
    image.show()
    image_size = image.size
Example #14
0
image_files = sorted(os.listdir(image_dir))

#画像データの読み込みと可視化
annotations_dir = os.path.join('competition_data', 'val_annotations')
annotations_files = sorted(os.listdir(annotations_dir))

annotation = []
for file in annotations_files:
    with open(os.path.join(annotations_dir,file)) as f:
        data = json.load(f)
        annotation.append(data)

#SSDインポート
from ssd import SSD

ssd = SSD("ssd_7")
models_dir = os.path.join(".", "trained_models")
model_path = os.path.join(models_dir, "ssd7.h5")
ssd.load_weights(model_path)

def plot_bbox(img, gt, out):
    # グラフサイズの指定
    plt.figure(figsize=(11,11))

    # 衛星データを表示(BGR->RGB)
    plt.imshow(img[:,:,::-1])

    # 今操作したいaxis(画像)を選択
    current_axis = plt.gca()

    # 正解となるbboxを可視化(赤色で表示)
Example #15
0
def create_mobilenetv2_ssd_lite(num_classes,
                                width_mult=1.0,
                                use_batch_norm=True,
                                onnx_compatible=False,
                                is_test=False):
    base_net = MobileNetV2(width_mult=width_mult,
                           use_batch_norm=use_batch_norm,
                           onnx_compatible=onnx_compatible).features

    source_layer_indexes = [
        GraphPath(14, 'conv', 3),
        19,
    ]
    extras = ModuleList([
        InvertedResidual(1280, 512, stride=2, expand_ratio=0.2),
        InvertedResidual(512, 256, stride=2, expand_ratio=0.25),
        InvertedResidual(256, 256, stride=2, expand_ratio=0.5),
        InvertedResidual(256, 64, stride=2, expand_ratio=0.25)
    ])

    regression_headers = ModuleList([
        SeperableConv2d(in_channels=round(576 * width_mult),
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * 4,
                        kernel_size=3,
                        padding=1,
                        onnx_compatible=False),
        Conv2d(in_channels=64, out_channels=6 * 4, kernel_size=1),
    ])

    classification_headers = ModuleList([
        SeperableConv2d(in_channels=round(576 * width_mult),
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=1280,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=512,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        SeperableConv2d(in_channels=256,
                        out_channels=6 * num_classes,
                        kernel_size=3,
                        padding=1),
        Conv2d(in_channels=64, out_channels=6 * num_classes, kernel_size=1),
    ])

    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config)
Example #16
0
class App(Gtk.Window):

    def __init__(self):
        Gtk.Window.__init__(self)

        self.host = Host()
        self.cpu = CPU()
        self.gpu_integrated = GPU_Integrated()
        self.gpu_discrete = GPU_Discrete()
        self.ssd = SSD()
        self.bat = Battery()

        self.notebook = Gtk.Notebook()
        self.add(self.notebook)

        self.page_1 = Gtk.Box()
        self.__fill_page_1()
        self.notebook.append_page(self.page_1, Gtk.Label('Hardware Monitor'))


        self.page_2 = Gtk.Box()
        self.__fill_page_2()
        self.notebook.append_page(self.page_2, Gtk.Label('Hardware Info'))


        self.page_3 = Gtk.Box()
        self.__fill_page_3()
        self.notebook.append_page(self.page_3, Gtk.Label('About'))

#---------------------------------------------- PAGE 1 -------------------------------------------------------------

    def __fill_page_1(self):
        self.store_1 = Gtk.TreeStore(str, str, str, str) 
        treeview = Gtk.TreeView(self.store_1)
        treeview.set_enable_tree_lines(True)
        treeview.modify_font(Pango.FontDescription('monaco 10'))
        renderer = Gtk.CellRendererText()

        scrolled_window = Gtk.ScrolledWindow()
        scrolled_window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
        scrolled_window.add(treeview)
        self.page_1.pack_start(scrolled_window, True, True, 0)

        columns = ('Sensor', 'Value', 'Min', 'Max')
        for i in range(len(columns)):
            column = Gtk.TreeViewColumn(columns[i], renderer, text=i)
            treeview.append_column(column)

        self.__add_nodes_to_store_1()
        treeview.expand_all()

        self.__add_threads()
        self.__set_threads_daemons()
        self.__start_threads()


    def __add_nodes_to_store_1(self):
        host_node = self.store_1.append(None, [self.host.get_name()] + [''] * 3 )

        cpu_node = self.store_1.append(host_node, [self.cpu.get_name()] + [''] * 3)

        cpu_temp_node = self.store_1.append(cpu_node, ['Temperature'] + [''] * 3)
        self.cpu_temp_value_nodes = []     
        for t_l in self.cpu.get_temp_labels():
            self.cpu_temp_value_nodes.append(self.store_1.append(cpu_temp_node, [t_l] + [''] * 3))

        cpu_freq_node = self.store_1.append(cpu_node, ['Frequency'] + [''] * 3)
        self.cpu_freq_value_nodes = []     
        for f_l in self.cpu.get_freq_labels():
            self.cpu_freq_value_nodes.append(self.store_1.append(cpu_freq_node, [f_l] + [''] * 3))

        cpu_usage_node = self.store_1.append(cpu_node, ['Usage'] + [''] * 3)
        self.cpu_usage_value_nodes = []     
        for u_l in self.cpu.get_usage_labels():
            self.cpu_usage_value_nodes.append(self.store_1.append(cpu_usage_node, [u_l] + [''] * 3))

        gpu_interated_node = self.store_1.append(host_node, [self.gpu_integrated.get_name()] + [''] * 3)
        gpu_integrated_freq_node = self.store_1.append(gpu_interated_node, ['Frequency'] + [''] * 3)
        self.gpu_integrated_freq_value_node = self.store_1.append(gpu_integrated_freq_node, \
            [self.gpu_integrated.get_freq_label()] + [''] * 3)

        gpu_discrete_node = self.store_1.append(host_node, [self.gpu_discrete.get_name()] + [''] * 3)
        gpu_discrete_temp_node = self.store_1.append(gpu_discrete_node, ['Temperature'] + [''] * 3)
        self.gpu_discrete_temp_value_node = self.store_1.append(gpu_discrete_temp_node, \
            [self.gpu_discrete.get_temp_label()] + [''] * 3)

        ssd_node = self.store_1.append(host_node, [self.ssd.get_name()] + [''] * 3)

        ssd_temp_node = self.store_1.append(ssd_node, ['Temperature'] + [''] * 3)
        self.ssd_temp_value_node = self.store_1.append(ssd_temp_node, [self.ssd.get_temp_label()] + [''] * 3)

        bat_node = self.store_1.append(host_node, [self.bat.get_name()] + [''] * 3)

        bat_voltage_node = self.store_1.append(bat_node, ['Voltage'] + [''] * 3)
        self.bat_voltage_value_node = self.store_1.append(bat_voltage_node, [self.bat.get_voltage_label()] + [''] * 3) 

        bat_charge_node = self.store_1.append(bat_node, ['Charge'] + [''] * 3)
        self.store_1.append(bat_charge_node, [self.bat.get_charge_header_label()] + self.bat.get_charge_header_row())
        self.bat_charge_value_node = self.store_1.append(bat_charge_node, [self.bat.get_charge_label()] + [''] * 3)


    def __add_threads(self):
        sensors_update_callbacks = [
                                    self.__cpu_temp_update_callback, 
                                    self.__cpu_freq_update_callback,
                                    self.__cpu_usage_update_callback, 
                                    self.__gpu_integrated_freq_update_callback, 
                                    self.__gpu_discrete_temp_update_callback, 
                                    self.__ssd_temp_update_callback, 
                                    self.__bat_voltage_update_callback, 
                                    self.__bat_charge_update_callback
                                    ]

        self.sensors_threads = []
        for c in sensors_update_callbacks:
            self.sensors_threads.append(threading.Thread(target=self.__thread_callback, args=[c]))

    def __thread_callback(self, update_func):
        while True:
            GObject.idle_add(update_func)
            time.sleep(INTERVAL)
    
    def __set_threads_daemons(self):
        for t in self.sensors_threads:
            t.daemon = True

    def __start_threads(self):
        for t in self.sensors_threads:
            t.start()



    def __cpu_temp_update_callback(self):
        cpu_temperature = self.cpu.get_temperature()
        for i, cpu_temp_row in enumerate(zip(*cpu_temperature)):
            self.store_1[self.cpu_temp_value_nodes[i]][1:] = [str(x) + ' °C' for x in cpu_temp_row]


    def __cpu_freq_update_callback(self):
        cpu_frequency = self.cpu.get_frequency()
        for i, cpu_freq_row in enumerate(zip(*cpu_frequency)):
            self.store_1[self.cpu_freq_value_nodes[i]][1:] = [str(x) + ' MHz' for x in cpu_freq_row]

    def __cpu_usage_update_callback(self):
        cpu_usage = self.cpu.get_usage()
        for i, cpu_usage_row in enumerate(zip(*cpu_usage)):
            self.store_1[self.cpu_usage_value_nodes[i]][1:] = [str(x) + ' %' for x in cpu_usage_row]


    def __gpu_integrated_freq_update_callback(self):
        gpu_freq_row = self.gpu_integrated.get_frequency()
        self.store_1[self.gpu_integrated_freq_value_node][1:] = [str(x) + ' MHz' for x in gpu_freq_row]

    def __gpu_discrete_temp_update_callback(self):
        gpu_temp_row = self.gpu_discrete.get_temperature()
        self.store_1[self.gpu_discrete_temp_value_node][1:] = [str(x) + ' °C' for x in gpu_temp_row]


    def __ssd_temp_update_callback(self):
        ssd_temp_row = self.ssd.get_temperature()
        self.store_1[self.ssd_temp_value_node][1:] = [str(x) + ' °C' for x in ssd_temp_row]


    def __bat_voltage_update_callback(self):
        bat_voltage_row = self.bat.get_voltage()
        self.store_1[self.bat_voltage_value_node][1:] = [str(x) + ' V' for x in bat_voltage_row]

    def __bat_charge_update_callback(self):
        bat_charge_row = self.bat.get_charge()
        self.store_1[self.bat_charge_value_node][1:] = [str(x) + ' mWh' for x in bat_charge_row]

#---------------------------------------------- PAGE 2 -------------------------------------------------------------

    def __fill_page_2(self):
        self.store_2 = Gtk.TreeStore(str)
        treeview = Gtk.TreeView(self.store_2)
        treeview.set_enable_tree_lines(True)
        treeview.modify_font(Pango.FontDescription('monaco 10'))
        renderer = Gtk.CellRendererText()

        scrolled_window = Gtk.ScrolledWindow()
        scrolled_window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
        scrolled_window.add(treeview)
        self.page_2.pack_start(scrolled_window, True, True, 0)

        column = Gtk.TreeViewColumn('List Hardware', renderer, text=0)
        treeview.append_column(column)

        self.dimms = self.__read_dimms()

        data = self.__read_lshw()
        self.__parse_json_to_store_2(data)


    def __read_dimms(self):
        path = 'sysfs/decode_dimms'
        with open(path, 'r') as f:
            s = f.read()
        return s.split('\n\n\n')[1:3]


    def __parse_dimm(self, dimm, parent_node, i):
        dimm_blks = dimm.split('\n\n')
        dimm_header = dimm_blks[0].split('\n')
        dimm_header[0], dimm_header[1] = dimm_header[1], dimm_header[0]
        dimm_blks[0] = dimm_header[1]

        dimm_name = dimm_header[0][48:-1] + str(i)
        dimm_from_name = dimm_header[1].split(':')[0]
        dimm_from_path = dimm_header[1].split(':')[1].lstrip(' ')

        name_node = self.store_2.append(parent_node, [dimm_name])
        k_node = self.store_2.append(name_node, [dimm_from_name])
        self.store_2.append(k_node, [dimm_from_path])

        for blk in dimm_blks[1:]:
            blk_l = blk.split('\n')
            blk_name = blk_l[0].split('===')[1][1:-1]
            blk_name_node = self.store_2.append(name_node, [blk_name])
            for s in blk_l[1:]:
                k = s[0:48].rstrip(' ')
                v = s[48:]
                k_node = self.store_2.append(blk_name_node, [k])
                self.store_2.append(k_node, [v])


    def __read_lshw(self):
        path = 'sysfs/lshw'
        with open(path, 'r') as f:
            data_str = f.read()
        return json.loads(data_str)


    def __parse_json_to_store_2(self, d, parent_node=None):
        if type(d) is dict:
            if 'id' in d:
                if re.match('bank:[0-9]', d['id']):
                    n = int(d['id'][-1])
                    if n in (0, 2):
                        n >>= 1
                        self.__parse_dimm(self.dimms[n], parent_node, n)
                else:
                    parent_node = self.store_2.append(parent_node, [str(d['id'])])
                    del(d['id'])
                    for k in d:
                        if k not in ('capabilities', 'configuration', 'children'):
                            key_node = self.store_2.append(parent_node, [str(k)])
                            if type(d[k]) is not list:
                                value_node = self.store_2.append(key_node, [str(d[k])])
                            else:
                                for v in d[k]:
                                    value_node = self.store_2.append(key_node, [str(v)])
                        elif k in ('configuration', 'capabilities'):
                            key_node = self.store_2.append(parent_node, [str(k)])
                            value_dict = d[k]
                            for k_dict in value_dict:
                                key_l_node = self.store_2.append(key_node, [str(k_dict)])
                                self.store_2.append(key_l_node, [str(value_dict[k_dict])])

                if 'children' in d:
                    for list_dict in d['children']:
                        self.__parse_json_to_store_2(list_dict, parent_node)

#---------------------------------------------- PAGE 3 -------------------------------------------------------------

    def __fill_page_3(self):
        textview = Gtk.TextView()
        textview.modify_font(Pango.FontDescription('Droid Sans 14'))
        textview.set_editable(False)
        textview.set_cursor_visible(False)
        textview.set_justification(Gtk.Justification.CENTER)
        textbuffer = textview.get_buffer()
        s = '\n\n\n\n\n\n\n\n\nThis program is brought to you by\nArtluix - Daineko Stanislau\nSt. of BSUIR of FKSiS\nof chair of Informatics\n\nBig thanks and credits to devs of:\ndecode-dimms\n lshw\n hdparm\n hddtemp'
        textbuffer.set_text(s)
        self.page_3.pack_start(textview, True, True, 0)
Example #17
0
#-----------------------------------------------------------------------#
#   predict.py将单张图片预测、摄像头检测、FPS测试和目录遍历检测等功能
#   整合到了一个py文件中,通过指定mode进行模式的修改。
#-----------------------------------------------------------------------#
import time

import cv2
import numpy as np
from PIL import Image

from ssd import SSD

if __name__ == "__main__":
    ssd = SSD()
    #----------------------------------------------------------------------------------------------------------#
    #   mode用于指定测试的模式:
    #   'predict'           表示单张图片预测,如果想对预测过程进行修改,如保存图片,截取对象等,可以先看下方详细的注释
    #   'video'             表示视频检测,可调用摄像头或者视频进行检测,详情查看下方注释。
    #   'fps'               表示测试fps,使用的图片是img里面的street.jpg,详情查看下方注释。
    #   'dir_predict'       表示遍历文件夹进行检测并保存。默认遍历img文件夹,保存img_out文件夹,详情查看下方注释。
    #   'export_onnx'       表示将模型导出为onnx,需要pytorch1.7.1以上。
    #----------------------------------------------------------------------------------------------------------#
    mode = "predict"
    #-------------------------------------------------------------------------#
    #   crop                指定了是否在单张图片预测后对目标进行截取
    #   count               指定了是否进行目标的计数
    #   crop、count仅在mode='predict'时有效
    #-------------------------------------------------------------------------#
    crop            = False
    count           = False
    #----------------------------------------------------------------------------------------------------------#
                   'car','cat','chair','cow','diningtable','dog','horse',\
                   'motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']

    #SSD300の設定
    ssd_cfg = {
            'num_classes': 21, #背景クラスを含めた合計クラス数
            'input_size': 300, #画像の入力サイズ
            'bbox_aspect_num': [4, 6, 6, 6, 4, 4], #出力するDBoxのアスペクト比の種類
            'feature_maps': [38, 19, 10, 5, 3, 1], #各sourceの画像サイズ
            'steps': [8, 16, 32, 64, 100, 300], #DBOXの大きさを決める
            'min_sizes': [30, 60, 111, 162, 213, 264], #DBOXの大きさを決める
            'max_sizes': [60, 111, 162, 213, 264, 315], #DBOXの大きさを決める
            'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
    }
    #SSDネットワークモデル
    net = SSD(phase="inference", cfg=ssd_cfg)

    #device = torch.device('cpu')

    #SSDの学習済み重みを設定
    #net_weights = torch.load('./weights/ssd300_600.pth', map_location=device)
    #net_weights = torch.load('./weights/ssd300_50.pth', map_location={'cuda0': 'cpu'})
    net_weights = torch.load('./weights/ssd300_1.pth', map_location={'cuda0': 'cpu'})

    #net_weights = torch.load('./weights/ssd300_mAP_77.43_v2.pth', map_location={'cuda0': 'cpu'})
    ###net_weights = torch.load('./weights/ssd300_mAP_77.43_v2.pth', map_location=device)

    net.load_state_dict(net_weights)

    print('ネットワーク設定完了:学習済みの重みをロードしました')
Example #19
0
#%%
# ssd = SSD(filt_params_signal=dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
#                                   l_trans_bandwidth=1, h_trans_bandwidth=1,
#                                   fir_design='firwin'),\
#           filt_params_noise=dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
#                                   l_trans_bandwidth=1, h_trans_bandwidth=1,
#                                   fir_design='firwin'),
#           filt_params_noise_stop=dict(l_freq=freqs_noise2[1], h_freq=freqs_noise2[0],
#                                   l_trans_bandwidth=1, h_trans_bandwidth=1,
#                                   fir_design='firwin'),
#           sampling_freq=sf, picks=picks, rank="full")

ssd = SSD(filt_params_signal=dict(l_freq=freqs_sig[0], h_freq=freqs_sig[1],
                                  l_trans_bandwidth=1, h_trans_bandwidth=1,
                                  fir_design='firwin'),\
          filt_params_noise=dict(l_freq=freqs_noise[0], h_freq=freqs_noise[1],
                                  l_trans_bandwidth=1, h_trans_bandwidth=1,
                                  fir_design='firwin'),
          sampling_freq=sf, picks=picks, rank="full", n_fft=4096)
#%%
ssd.fit(raw.copy().crop(0, 120))
#%%

ssd_sources = ssd.transform(raw)
#%%
psd, freqs = mne.time_frequency.psd_array_welch(ssd_sources,
                                                sfreq=raw.info['sfreq'],
                                                n_fft=4096)
# psd, freqs = mne.time_frequency.psd_array_welch(
#     raw.get_data(), sfreq=raw.info['sfreq'], n_fft=int(np.ceil(raw.info['sfreq']/2)))
#%%
Example #20
0
def create_mobilenetv1_ssd(num_classes, is_test=False):
    base_net = MobileNetV1(1001).model  # disable dropout layer

    source_layer_indexes = [
        12,
        14,
    ]
    extras = ModuleList([
        Sequential(
            Conv2d(in_channels=1024, out_channels=256, kernel_size=1), ReLU(),
            Conv2d(in_channels=256,
                   out_channels=512,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(
            Conv2d(in_channels=512, out_channels=128, kernel_size=1), ReLU(),
            Conv2d(in_channels=128,
                   out_channels=256,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(
            Conv2d(in_channels=256, out_channels=128, kernel_size=1), ReLU(),
            Conv2d(in_channels=128,
                   out_channels=256,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU()),
        Sequential(
            Conv2d(in_channels=256, out_channels=128, kernel_size=1), ReLU(),
            Conv2d(in_channels=128,
                   out_channels=256,
                   kernel_size=3,
                   stride=2,
                   padding=1), ReLU())
    ])

    regression_headers = ModuleList([
        Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=1024, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=512, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1),
        Conv2d(in_channels=256, out_channels=6 * 4, kernel_size=3, padding=1),
    ])

    classification_headers = ModuleList([
        Conv2d(in_channels=512,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=1024,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=512,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
        Conv2d(in_channels=256,
               out_channels=6 * num_classes,
               kernel_size=3,
               padding=1),
    ])

    return SSD(num_classes,
               base_net,
               source_layer_indexes,
               extras,
               classification_headers,
               regression_headers,
               is_test=is_test,
               config=config)
parser.add_argument('--weights',
                    default='checkpoints/model_300_VGG16_final_logos.pth.tar',
                    type=str,
                    help='Checkpoint of the model')
parser.add_argument('--cuda',
                    default=True,
                    type=str2bool,
                    help='Enable or not cuda')
parser.add_argument('--test_filenames',
                    default='test_images/*.jpg',
                    type=str,
                    help='Regex of filenames')
args = parser.parse_args()

net = SSD(cuda=args.cuda,
          architecture='300_VGG16',
          num_classes=len(LogoDataset.CLASSES))
has_cuda = args.cuda and torch.cuda.is_available()
if has_cuda:
    weights = torch.load(args.weights)['model']
else:
    weights = torch.load(args.weights, map_location='cpu')['model']
net = SSD.load(weights=weights)

COLORMAP = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
images = []
images = [cv2.imread(filename) for filename in glob.glob(args.test_filenames)]

results = net.predict(images)

for im, result_image in zip(images, results):
def train(train_config):
    logger = Logger(HOME+'/log', train_config.basenet)
    if train_config.dataset_name == 'VOC':
        cfg = voc_config
        dataset = VOCDataset(DATA_DIR, transform=SSDAugmentation(
            cfg['min_dim'], MEANS))
    elif train_config.dataset_name == 'COCO':
        cfg = coco_config
        dataset = COCODataset(DATA_DIR, transform=SSDAugmentation(
            cfg['min_dim'], MEANS))

    if train_config.visdom:
        import visdom
        viz = visdom.Visdom()

    ssd_net = SSD('train', train_config.basenet,
                  cfg['min_dim'], cfg['num_classes'], with_fpn=train_config.with_fpn)
    net = ssd_net
    if train_config.cuda:
        net = nn.DataParallel(ssd_net)
        cudnn.benchmark = True
    if train_config.resume:
        logger('Loading {} ...'.format(train_config.resume))
        load_weights = torch.load(
            train_config.resume, map_location=lambda storage, loc: storage)
        ssd_net.load_state_dict(load_weights)
    if train_config.cuda:
        net = net.cuda()
    if not train_config.resume:
        logger('Initializing weights ...')
        ssd_net.topnet.apply(weights_init)
        ssd_net.loc_layers.apply(weights_init)
        ssd_net.conf_layers.apply(weights_init)

    optimizer = optim.Adam(net.parameters(), lr=train_config.lr,
                           weight_decay=train_config.weight_decay)
    criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,
                             False, train_config.cuda)

    net.train()
    # loss counters
    loc_loss = 0
    conf_loss = 0
    epoch = 0
    logger('Loading the dataset...')

    epoch_size = len(dataset) // train_config.batch_size
    logger('Training SSD on:{}'.format(dataset.name))
    # logger('using the specified args:')

    step_index = 0

    if train_config.visdom:
        vis_title = 'SSD.PyTorch on ' + dataset.name
        vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']
        iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)
        epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)

    data_loader = data.DataLoader(dataset, train_config.batch_size,
                                  num_workers=train_config.num_workers,
                                  shuffle=True, collate_fn=detection_collate,
                                  pin_memory=True)
    # create batch iterator
    batch_iterator = iter(data_loader)
    t0 = time.time()
    for iteration in range(train_config.start_iter, cfg['max_iter']):
        if train_config.visdom and iteration != 0 and (iteration % epoch_size == 0):
            update_vis_plot(epoch, loc_loss.item(), conf_loss.item(), epoch_plot, None,
                            'append', epoch_size)
            logger('epoch = {} : loss = {}, loc_loss = {}, conf_loss = {}'.format(
                epoch, loc_loss + conf_loss, loc_loss, conf_loss))
            # reset epoch loss counters
            loc_loss = 0
            conf_loss = 0
            epoch += 1

        if iteration in cfg['lr_steps']:
            step_index += 1
            adjust_learning_rate(optimizer, train_config.lr,
                                 train_config.gamma, step_index)

        # load train data
        images, targets = next(batch_iterator)

        if iteration//epoch_size > 0 and iteration % epoch_size == 0:
            batch_iterator = iter(data_loader)
            print(iteration)

        if train_config.cuda:
            images = images.cuda()
            targets = [ann.cuda()for ann in targets]
        # else:
        #     images=torch.tensor(images)
        #     targets=torch.tensor(targets)
        # forward

        out = net(images)
        # backprop
        optimizer.zero_grad()
        loss_l, loss_c = criterion(out, targets)
        loss = loss_l + loss_c
        loss.backward()
        optimizer.step()
        t1 = time.time()
        if train_config.visdom:
            loc_loss += loss_l.item()
            conf_loss += loss_c.item()

        if iteration % 50 == 0:
            t1 = time.time()
            logger('timer: %.4f sec. || ' % (t1 - t0)+'iter ' + repr(iteration) +
                   ' || Loss: %.4f ||' % (loss.item()) +
                   ' || loc_loss: %.4f ||' % (loss_l.item()) +
                   ' || conf_loss: %.4f ||' % (loss_c.item()))
            t0 = time.time()

        if train_config.visdom:
            update_vis_plot(iteration, loss_l.item(), loss_c.item(),
                            iter_plot, epoch_plot, 'append')

        if iteration != 0 and iteration % 5000 == 0:
            logger('Saving state, iter:%d' % iteration)
            torch.save(ssd_net.state_dict(), train_config.save_folder +
                       'ssd224_VOC_' + repr(iteration) + '.pth')
    torch.save(ssd_net.state_dict(),
               train_config.save_folder + 'ssd224_VOC.pth')
Example #23
0
image_files = sorted(os.listdir(image_dir))

#画像データの読み込みと可視化
annotations_dir = os.path.join('competition_data', 'val_annotations')
annotations_files = sorted(os.listdir(annotations_dir))

annotation = []
for file in annotations_files:
    with open(os.path.join(annotations_dir,file)) as f:
        data = json.load(f)
        annotation.append(data)

#SSDモデル設定
from ssd import SSD

ssd = SSD("ssd_7")

models_dir = os.path.join(".", "trained_models")
print(os.listdir(models_dir))

model_path = os.path.join(models_dir, "ssd7.h5")
ssd.load_weights(model_path)


##################################################################

# 画像を1枚選択
index = 4
image_path = os.path.join(image_dir, image_files[index])
img = cv2.imread(image_path)
gt = annotation[index]
Example #24
0
import keras
import pickle
from videotest import VideoTest

import sys
sys.path.append("..")
from ssd import SSD300 as SSD

input_shape = (300, 300, 3)

# Change this if you run with other classes than VOC
class_names = [
    "background", "dog", "bicycle", "bird", "boat", "bottle", "bus", "car",
    "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike",
    "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"
]
NUM_CLASSES = len(class_names)

model = SSD(input_shape, num_classes=NUM_CLASSES)

# Change this path if you want to use your own trained weights
model.load_weights('../weights_SSD300.hdf5')

vid_test = VideoTest(class_names, model, input_shape)

# To test on webcam 0, remove the parameter (or change it to another number
# to test on that webcam)
vid_test.run('path/to/your/video.mkv')
Example #25
0
from keras.layers import Input
from ssd import SSD
from PIL import Image

ssd = SSD()

while True:
    img = input('Input image filename:')
    try:
        image = Image.open(img)
    except:
        print('Open Error! Try again!')
        continue
    else:
        r_image = ssd.detect_image(image)
        r_image.show()
Example #26
0
image_files = sorted(os.listdir(image_dir))

#画像データの読み込みと可視化
annotations_dir = os.path.join('competition_data', 'val_annotations')
annotations_files = sorted(os.listdir(annotations_dir))

annotation = []
for file in annotations_files:
    with open(os.path.join(annotations_dir,file)) as f:
        data = json.load(f)
        annotation.append(data)

#SSDインポート
from ssd import SSD

ssd = SSD("ssd_7")
models_dir = os.path.join(".", "trained_models")
model_path = os.path.join(models_dir, "ssd7.h5")
ssd.load_weights(model_path)

def plot_bbox(img, gt, out):
    # グラフサイズの指定
    plt.figure(figsize=(11,11))

    # 衛星データを表示(BGR->RGB)
    plt.imshow(img[:,:,::-1])

    # 今操作したいaxis(画像)を選択
    current_axis = plt.gca()

    # 正解となるbboxを可視化(赤色で表示)
Example #27
0
        dataset = VOC_Dataset(train_roots, val_roots, test_roots)
    else:
        raise ValueError(
            "Wrong or unsupported dataset. Available 'COCO' or 'VOC'")
    print("\n\Testing on %s dataset" % (DATASET_NAME + TESTSET_YEAR))
    dataset.show_info()
    _ = input("Press Enter to continue...")

    ## 2. Dataloader initialization
    print("\t2. Dataloader initialization...")
    dataloader = Dataloader(dataset, TEST_SIZE)
    test_generator = dataloader.generate_batch("test")

    ## 3. Network initialization
    print("\t3. Network initialization...")
    ssd = SSD(num_classes=len(dataset.label_ids) + 1, input_shape=INPUT_SHAPE)
    latest = tf.train.latest_checkpoint(CHECKPOINT_DIR)
    ssd.load_weights(latest)
    ssd.summary()
    _ = input("Press Enter to continue...")

    ## 4. Generate default boxes
    print("\t4. Default boxes generation...")
    fm_shapes = ssd.output_shape
    aspect_ratios = ASPECT_RATIOS
    scales = SCALES
    default_boxes = Image.generate_default_boxes(fm_shapes, aspect_ratios,
                                                 scales)
    # ---------------------------------------------------------------- #

    print("Initialization completed!")
Example #28
0
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard import SummaryWriter
import torch
import torchvision
from torchvision import datasets, transforms
from torch.autograd import Variable
from ssd import SSD

ssd = SSD()
model = ssd.net.module
writer = SummaryWriter()
for i in range(5):
    images = torch.randn(4, 3, 300, 300).cuda()
    writer.add_graph(model, input_to_model=images, verbose=False)
writer.flush()
writer.close()
Example #29
0
                        help=help_)
    help_ = "Number of layers"
    parser.add_argument("-l", "--layers", default=6, type=int, help=help_)
    help_ = "Camera index"
    parser.add_argument("--camera", default=0, type=int, help=help_)
    help_ = "Record video"
    parser.add_argument("-r",
                        "--record",
                        default=False,
                        action='store_true',
                        help=help_)
    help_ = "Video filename"
    parser.add_argument("-f", "--filename", default="demo.mp4", help=help_)

    args = parser.parse_args()

    if args.tiny:
        ssd = SSD(n_layers=args.layers, normalize=args.normalize)
    else:
        ssd = SSD(n_layers=args.layers,
                  build_basenet=build_resnet,
                  normalize=args.normalize)

    if args.weights:
        ssd.load_weights(args.weights)
        videodemo = VideoDemo(detector=ssd,
                              camera=args.camera,
                              record=args.record,
                              filename=args.filename)
        videodemo.loop()
Example #30
0
from keras.layers import Input
from ssd import SSD
from PIL import Image

ssd = SSD()

while True:
    img = input('Input image filename:')
    try:
        image = Image.open(img)
    except:
        print('Open Error! Try again!')
        continue
    else:
        r_image = ssd.detect_image(image)
        r_image.show()
ssd.close_session()
    
Example #31
0
#-------------------------------------#
#       调用摄像头检测
#-------------------------------------#
from keras.layers import Input
from ssd import SSD
from PIL import Image
import numpy as np
import cv2
ssd = SSD()
# 调用摄像头
capture = cv2.VideoCapture(0)  # capture=cv2.VideoCapture("1.mp4")

while (True):
    # 读取某一帧
    ref, frame = capture.read()
    # 格式转变,BGRtoRGB
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # 转变成Image
    frame = Image.fromarray(np.uint8(frame))

    # 进行检测
    frame = np.array(ssd.detect_image(frame))

    # RGBtoBGR满足opencv显示格式
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    cv2.imshow("video", frame)
    c = cv2.waitKey(30) & 0xff
    if c == 27:
        capture.release()
        break
Example #32
0
    dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}

    #SSD300の設定
    ssd_cfg = {
        #'num_classes': 21, #背景クラスを含めた合計クラス数
        'num_classes': 12,  #背景クラスを含めた合計クラス数
        'input_size': 300,  #画像の入力サイズ
        'bbox_aspect_num': [4, 6, 6, 6, 4, 4],  #出力するDBoxのアスペクト比の種類
        'feature_maps': [38, 19, 10, 5, 3, 1],  #各sourceの画像サイズ
        'steps': [8, 16, 32, 64, 100, 300],  #DBOXの大きさを決める
        'min_sizes': [30, 60, 111, 162, 213, 264],  #DBOXの大きさを決める
        'max_sizes': [60, 111, 162, 213, 264, 315],  #DBOXの大きさを決める
        'aspect_ratios': [[2], [2, 3], [2, 3], [2, 3], [2], [2]],
    }
    #SSDネットワークモデル
    net = SSD(phase="train", cfg=ssd_cfg)

    #SSDの初期の重みを設定
    #ssdのvgg部分に重みをロードする
    vgg_weights = torch.load('./weights/vgg16_reducedfc.pth')
    net.vgg.load_state_dict(vgg_weights)

    #ssdのその他のネットワークの重みはHeの初期値で初期化
    def weights_init(m):
        if isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight.data)
            if m.bias is not None:  #バイアス項がある場合
                nn.init.constant_(m.bias, 0.0)

    #Heの初期値を適応
    net.extras.apply(weights_init)