示例#1
0
def deploy_single():
    if not IS_GPU:
        caffe.set_mode_cpu()
    else:
        caffe.set_mode_gpu()
        caffe.set_device(0)

    folder = './phantast_val/2017-04-05_13-13-56_B1_universal'
    filename = 'crop_1_0'

    img_path = folder + '/' + filename + '.bmp'
    count = '_2018-10-12-1111_bat1_iter_182500'
    img_save_name = 'F:/MA_Yang/code/dump/' + filename + count
    net = caffe.Net(
        DEPLOY_PROTOTXT,
        caffe.TEST,
        weights=
        './snapshots/snapshot2018-10-12-1111_bat1_iter_182500.caffemodel')
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    img = np.zeros((IMG_TILE_WIDTH, IMG_TILE_WIDTH, 1), dtype=np.uint8)
    img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
    color_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    padding = (IMG_EXT_SIZE - IMG_TILE_HEIGHT) / 2
    img = cv2.copyMakeBorder(img, padding, padding, padding, padding,
                             cv2.BORDER_REFLECT_101)
    seg_mask_ptry = 0
    seg_mask = np.zeros((IMG_TILE_WIDTH, IMG_TILE_WIDTH, 1), dtype=np.uint8)
    for y in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_HEIGHT + 1,
                         SEG_MASK_STRIDE):
        seg_mask_ptrx = 0
        for x in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_WIDTH + 1,
                             SEG_MASK_STRIDE):
            tmp = img[y:y + SEG_MASK_WIDTH, x:x + SEG_MASK_HEIGHT]
            net.blobs['data'].data[...] = transformer.preprocess('data', tmp)
            out = net.forward()
            result = net.blobs['argmax'].data[0, 0, :, :]
            seg_mask_height = result.shape[0]
            seg_mask_width = result.shape[1]
            seg_mask[seg_mask_ptry:seg_mask_ptry + seg_mask_height, seg_mask_ptrx:seg_mask_ptrx + seg_mask_width, 0] = \
                net.blobs['argmax'].data[0, 0, :, :]
            seg_mask_ptrx = seg_mask_ptrx + seg_mask_width
        seg_mask_ptry = seg_mask_ptry + seg_mask_height

    seg_mask = np.array(seg_mask).astype(np.uint8)
    x, y, z = np.nonzero(seg_mask == 1)
    seg_mask[x, y] = 255
    x, y, z = np.nonzero(seg_mask == 2)
    seg_mask[x, y] = 128
    print np.unique(seg_mask)

    _, contours, _ = cv2.findContours(seg_mask, cv2.RETR_TREE,
                                      cv2.CHAIN_APPROX_SIMPLE)
    color_img = cv2.drawContours(color_img, contours, -1, (0, 255, 255), 1)
    cv2.imwrite(img_save_name + count + '_contour.png', color_img)
    cv2.imwrite(img_save_name + count + '.png',
                seg_mask)  # save image in the directory
def return_dict_data(sta_date, end_date):
	sql_update_data_fixed 	= """ SELECT MIN(ord_id), MAX(ord_id) FROM orders_new 
								WHERE ord_date BETWEEN {} 
								AND {} """.format(sta_date, end_date)
	max_min_ord_id_fixed	= Data.Fetch_All(sql_update_data_fixed) 	
	dic_return_fixed 		= {}
	list_ord_id 			= []
	for i in functions.my_range(max_min_ord_id_fixed[0][0], max_min_ord_id_fixed[0][1],1000):
		# get estore_id
		lm			= i + 1000 # limit
		if (lm > max_min_ord_id_fixed[0][1]):
			lm = max_min_ord_id_fixed[0][1]
		if(lm <= max_min_ord_id_fixed[0][1]):
			sql_get = """SELECT ord_id, ord_estore_id, ord_code, ord_date FROM orders_new
						WHERE ord_id BETWEEN {0} 
						AND {1} 
						AND ord_estore_id IN({2})""".format(i, lm,','.join(GSpS.Get_Value_Gs_By_Range('')))
			dic 	= Data.Fetch_All(sql_get)
			for j in range(len(dic)): 
				est_id 		= dic[j][1]
				list_ord_id.append(dic[j][0]);
				dic_return_fixed.update({'orders_product_{0}'.format(est_id % 20) : list_ord_id}) 
	list_fixed 				= []		
	for k in dic_return_fixed:
		# chuyển list sang string
		str_ord_id 		= ', '.join(str(e) for e in dic_return_fixed[k])
		# sql lấy ra thông tin của đơn hàng
		sql_info_order_fixed 	= """ SELECT ord_code, ord_phone, ord_estore_id, ord_status, pro_id, pro_category, op_price, op_quantity, onc_status, orr_source_referer FROM {0} 
									LEFT JOIN orders_new ON op_order_id = ord_id
									LEFT JOIN orders_new_checked ON ord_id = onc_order_id
									LEFT JOIN orders_referer ON ord_id = orr_order_id
									LEFT JOIN products_multi ON op_product_id = pro_id 
									WHERE op_order_id IN ({1}) """.format(k, str_ord_id)
		list_fixed.append(Data.Fetch_All(sql_info_order_fixed))		
	return list_fixed
示例#3
0
                    count_b += len(xx)
                s = count_b+count_c+count_d
                count_b /=s
                count_c /=s
                count_d /=s
                data_back.append(count_b)
                data_cell.append(count_c)
                data_dead.append(count_d)
"""
for folder in SubDirPath(
        'D:/SublimeTextProject/MasterArbeit/data/phantast_test/'):
    for paths in FilesInDir(folder, r'((^crop_\d_\d_phantast_result.tif$))'):
        img = cv2.imread(paths, cv2.IMREAD_GRAYSCALE)

        for y in fc.my_range(
                0, IMG_EXT_SIZE - SEG_MASK_HEIGHT + 1, SEG_MASK_STRIDE
        ):  # for (y=0; y<IMG_EXT_SIZE - SEG_MASK_HEIGHT + 1; y+=SEG_MASK_HEIGHT)
            count_d = 0.0
            count_c = 0.0
            count_b = 0.0
            for x in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_WIDTH + 1,
                                 SEG_MASK_STRIDE):
                tmp = img[y:y + SEG_MASK_HEIGHT, x:x + SEG_MASK_WIDTH]
                xx, yy = np.nonzero(tmp == 255)
                if (xx.size != 0):
                    pixels_MSC += xx.size
                    count_cell += 1
                    count_c += len(xx)
                xx, yy = np.nonzero(tmp == 128)
                if (xx.size != 0):
                    pixels_dead += xx.size
示例#4
0
def deploy_Full_Image(img_path=DEPLOY_IMG,
                      caffemodel_path=DEPLOY_MODEL,
                      deploy_prototxt_path=DEPLOY_PROTOTXT):
    caffe.set_mode_gpu()
    caffe.set_device(0)

    start_time = time.time()
    img_data = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
    lx, ly = img_data.shape

    save_dir = './FullSegmentationResult/'
    net = caffe.Net(deploy_prototxt_path, caffe.TEST, weights=caffemodel_path)
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    c = 0
    for yy in xrange(4):
        for xx in xrange(4):
            img_save_name = 'tile_' + str(c)
            img_crop = img_data[lx / 3 + xx * IMG_TILE_WIDTH:lx / 3 +
                                (xx + 1) * IMG_TILE_WIDTH,
                                ly / 3 + yy * IMG_TILE_WIDTH:ly / 3 +
                                (yy + 1) * IMG_TILE_WIDTH]

            color_img = cv2.cvtColor(img_crop, cv2.COLOR_GRAY2RGB)
            padding = (IMG_EXT_SIZE - IMG_TILE_HEIGHT) / 2
            img_crop = cv2.copyMakeBorder(img_crop, padding, padding, padding,
                                          padding, cv2.BORDER_REFLECT_101)
            seg_mask_ptry = 0
            seg_mask = np.zeros((IMG_TILE_WIDTH, IMG_TILE_WIDTH, 1),
                                dtype=np.uint8)

            for y in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_HEIGHT + 1,
                                 SEG_MASK_STRIDE):
                seg_mask_ptrx = 0
                for x in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_WIDTH + 1,
                                     SEG_MASK_STRIDE):
                    tmp = img_crop[y:y + SEG_MASK_WIDTH, x:x + SEG_MASK_HEIGHT]
                    net.blobs['data'].data[...] = transformer.preprocess(
                        'data', tmp)
                    out = net.forward()
                    result = net.blobs['argmax'].data[0, 0, :, :]
                    seg_mask_height = result.shape[0]
                    seg_mask_width = result.shape[1]
                    seg_mask[seg_mask_ptry:seg_mask_ptry + seg_mask_height, seg_mask_ptrx:seg_mask_ptrx + seg_mask_width, 0] = \
                        net.blobs['argmax'].data[0, 0, :, :]
                    seg_mask_ptrx = seg_mask_ptrx + seg_mask_width
                seg_mask_ptry = seg_mask_ptry + seg_mask_height

            seg_mask = np.array(seg_mask).astype(np.uint8)
            x, y, z = np.nonzero(seg_mask == 1)
            seg_mask[x, y] = 255
            x, y, z = np.nonzero(seg_mask == 2)
            seg_mask[x, y] = 128

            _, contours, _ = cv2.findContours(seg_mask, cv2.RETR_TREE,
                                              cv2.CHAIN_APPROX_SIMPLE)
            color_img = cv2.drawContours(color_img, contours, -1,
                                         (0, 255, 255), 1)
            cv2.imwrite(save_dir + img_save_name + '_contour.png', color_img)
            cv2.imwrite(save_dir + img_save_name + '.png',
                        seg_mask)  # save image in the directory
            c += 1

    end_time = time.time()
    image_processing = end_time - start_time
    print 'processing time : ' + image_processing
示例#5
0
def create_lmdb(mode, txt_save_path, data_lmdb_path, label_lmdb_path, num_tiles):
    if os.path.exists(data_lmdb_path):
        shutil.rmtree(data_lmdb_path)
    if os.path.exists(label_lmdb_path):
        shutil.rmtree(label_lmdb_path)

    txtfile = open(txt_save_path, "r")
    imagelist = [line.split(' ')[0].replace('\n', '') for line in txtfile.readlines()]
    data = [img for img in imagelist if '_phantast_result' not in img]
    label = [img for img in imagelist if '_phantast_result' in img]

    print '\nCreating ' + mode
    data_datums = np.zeros(81 * NUM_AUG * num_tiles, dtype=caffe_pb2.Datum)
    label_datums = np.zeros(81 * NUM_AUG * num_tiles, dtype=caffe_pb2.Datum)
    counter = 0
    c = 0
    start_time = time.time()
    for data_path, label_path in itertools.izip(data, label):
        c+=1
        print data_path, label_path,c
        data_img = cv2.imread(data_path, cv2.IMREAD_GRAYSCALE)
        label_img = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)

        # pad image into 2236x2236
        padding = (IMG_EXT_SIZE - IMG_TILE_HEIGHT) / 2
        data_img = cv2.copyMakeBorder(data_img, padding, padding, padding, padding, cv2.BORDER_REFLECT_101)
        label_img = cv2.copyMakeBorder(label_img, padding, padding, padding, padding, cv2.BORDER_REFLECT_101)

        # save image and label as 2xCxHxW
        img = np.zeros((2, 1, IMG_EXT_SIZE, IMG_EXT_SIZE), dtype=np.uint8)
        img[0, 0, :, :] = data_img
        sx, sy = np.nonzero(label_img == 255)
        if sx.size !=0:
            label_img[sx, sy] = 1
        sx, sy = np.nonzero(label_img == 128)
        if sx.size !=0:
            label_img[sx, sy] = 2
        img[1, 0, :, :] = label_img

        del sx,sy,data_img,label_img

        for y in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_HEIGHT + 1,
                             SEG_MASK_STRIDE):  # for (y=0; y<IMG_EXT_SIZE - SEG_MASK_HEIGHT + 1; y+=SEG_MASK_HEIGHT)
            for x in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_WIDTH + 1, SEG_MASK_STRIDE):
                tmp = img[:, 0, y:y + SEG_MASK_HEIGHT, x:x + SEG_MASK_WIDTH]

                augimg = aug.DoAugmentation(tmp)
                del tmp

                for i in xrange(len(augimg)):
                    datum = caffe_pb2.Datum()
                    datum.channels = 1
                    datum.height = SEG_MASK_HEIGHT
                    datum.width = SEG_MASK_WIDTH
                    datum.data = augimg[i, 0, :, :].astype(np.uint8).tobytes()  # or .tostring() if numpy < 1.9
                    datum.label = 0
                    data_datums[counter] = datum

                    datum_label = caffe_pb2.Datum()
                    datum_label.channels = 1
                    datum_label.height = SEG_MASK_HEIGHT
                    datum_label.width = SEG_MASK_WIDTH
                    datum_label.data = augimg[i, 1, :, :].astype(np.uint8).tobytes()  # or .tostring() if numpy < 1.9
                    datum_label.label = 0
                    label_datums[counter] = datum_label
                    counter += 1
                    del datum_label, datum
                    
                del augimg

    end_time = time.time()
    print end_time - start_time

    if mode == 'val':
        map_size = 2e9
    else :
        map_size = 8e9

    data_datums, label_datums = shuffle(data_datums, label_datums)
    
    in_db = lmdb.open(data_lmdb_path, map_size=float(map_size))
    with in_db.begin(write=True) as in_txn:
        for count in xrange(len(data_datums)):
            in_txn.put('{:05}'.format(count).encode('ascii'), (data_datums[count]).SerializeToString())
    in_db.close()
    
    in_db = lmdb.open(label_lmdb_path, map_size=float(map_size))
    with in_db.begin(write=True) as in_txn:
        for count in xrange(len(label_datums)):
            in_txn.put('{:05}'.format(count).encode('ascii'), label_datums[count].SerializeToString())
    in_db.close()
    
    print '\nFinished processing all ' + mode + ' images'
示例#6
0
def CalculateMetricPerformance(root=CROP_PHANTAST_TEST):
    caffe.set_mode_gpu()
    caffe.set_device(0)
    file_list = []
    for subdir in fc.SubDirPath(root):
        file_list.append(fc.FilesInDir(subdir, r'((^crop_\d_\d_phantast_result.tif$)|(^crop_\d_\d.bmp$))'))

    file_list = [item for sublist in file_list for item in sublist]

    data_files = [file for file in file_list if '_phantast_result' not in file]
    label_files = [file for file in file_list if '_phantast_result' in file]
    print 'test:', len(data_files)
    print 'test', len(label_files)

    net = caffe.Net(DEPLOY_PROTOTXT, caffe.TEST, weights='snapshot2018-10-12-1111_bat1_iter_178500.caffemodel')
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    
    cm = 0
    img_count = 0
    f1_avg = np.array([0.0,0.0,0.0], dtype='f')
    recall_avg = np.array([0.0,0.0,0.0], dtype='f')
    precision_avg = np.array([0.0,0.0,0.0], dtype='f')
    
    for img_path in data_files:
        print img_path
        label_path = img_path.replace('.bmp', '_phantast_result.tif')
        img = np.zeros((IMG_TILE_WIDTH, IMG_TILE_WIDTH, 1), dtype=np.uint8)
        img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
        #img = cv2.GaussianBlur(img, (13, 13), 0)

        #cv2.imwrite(str(img_count) + '_blur13.bmp', img)  # save image in the directory

        
        label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
        padding = (IMG_EXT_SIZE - IMG_TILE_HEIGHT) / 2
        img = cv2.copyMakeBorder(img, padding, padding, padding, padding, cv2.BORDER_REFLECT_101)
        pred_mask_ptry = 0
        pred = np.zeros((IMG_TILE_WIDTH, IMG_TILE_WIDTH, 1), dtype=np.uint8)
        for y in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_HEIGHT + 1, SEG_MASK_STRIDE):
            pred_mask_ptrx = 0
            for x in fc.my_range(0, IMG_EXT_SIZE - SEG_MASK_WIDTH + 1, SEG_MASK_STRIDE):
                tmp_cm = img[y:y + SEG_MASK_WIDTH, x:x + SEG_MASK_HEIGHT]
                net.blobs['data'].data[...] = transformer.preprocess('data', tmp_cm)
                out = net.forward()
                result = net.blobs['argmax'].data[0, 0, :, :]
                pred_mask_height = result.shape[0]
                pred_mask_width = result.shape[1]
                pred[pred_mask_ptry:pred_mask_ptry + pred_mask_height, pred_mask_ptrx:pred_mask_ptrx + pred_mask_width, 0] = \
                    net.blobs['argmax'].data[0, 0, :, :]
                pred_mask_ptrx = pred_mask_ptrx + pred_mask_width
            pred_mask_ptry = pred_mask_ptry + pred_mask_height

        pred = np.array(pred).astype(np.uint8)
        x, y, z = np.nonzero(pred == 1)
        pred[x, y] = 255
        x, y, z = np.nonzero(pred == 2)
        pred[x, y] = 128

        cv2.imwrite(str(img_count) + '.png', pred)  # save image in the directory

        pred = pred.flatten()
        label = label.flatten()
        tmp_cm = confusion_matrix(label, pred, [0, 128, 255])
        cm += tmp_cm.astype('float') / tmp_cm.sum(axis=1)[:, np.newaxis]  # normalized

        tmp_cr = classification_report(label, pred, output_dict=True)
            
        recall_avg[0] += tmp_cr['0']['recall']
        recall_avg[1] += tmp_cr['128']['recall']
        recall_avg[2] += tmp_cr['255']['recall']
        f1_avg[0] += tmp_cr['0']['f1-score']
        f1_avg[1] += tmp_cr['128']['f1-score']
        f1_avg[2] += tmp_cr['255']['f1-score']
        precision_avg[0] += tmp_cr['0']['precision']
        precision_avg[1] += tmp_cr['128']['precision']
        precision_avg[2] += tmp_cr['255']['precision']

        print tmp_cm, tmp_cr,img_count
        
        img_count +=1
        
    
    cm = cm / img_count
    recall_avg = recall_avg / img_count
    f1_avg = f1_avg / img_count
    precision_avg = precision_avg / img_count

    print 'Confusion Matrix: \n', cm
    print 'recall_avg: \n', recall_avg
    print 'f1_avg: \n', f1_avg
    print 'precision_avg: \n', precision_avg