コード例 #1
0
def runOnFrames(vid_frames):
    print("hi")
    param, model = config_reader()
    p_left = []
    p_right = []
    caffe.set_mode_cpu()
    net = caffe.Net(model['deployFile'], model['caffemodel'], caffe.TEST)
    read_flag, image = vidcap.read()
    f = open("./Key Points/new_File.txt", "w+")
    for i in range(len(vid_frames)):
        resize = cv2.resize(image, (640, 480), interpolation=cv2.INTER_LINEAR)
        vid_frames.append(resize)

    for j in range(len(vid_frames)):

        multiplier = [
            x * model['boxsize'] / vid_frames[j].shape[0]
            for x in param['scale_search']
        ]
        all_kp = find_kp(vid_frames[j], model, net, param, multiplier, p_left,
                         p_right)
        print(all_kp)
        f.write(str(all_kp) + "\n")

    f.close()
コード例 #2
0
def get_mid_outputs(model_output):

    picsize = [64, 64]
    picsize_bf = [512, 512]
    param, model_params = config_reader()

    multiplier = [
        x * model_params['boxsize'] / picsize_bf[0]
        for x in param['scale_search']
    ]

    heatmap_avg = np.zeros((picsize_bf[0], picsize_bf[1], 19))
    paf_avg = np.zeros((picsize_bf[0], picsize_bf[1], 38))

    for m in range(len(multiplier)):
        scale = multiplier[m]
        imageToTest = cv2.resize(model_output, (0, 0),
                                 fx=scale,
                                 fy=scale,
                                 interpolation=cv2.INTER_CUBIC)
        imageToTest_padded, pad = util.padRightDownCorner(
            imageToTest, model_params['stride'], model_params['padValue'])

        input_img = np.transpose(
            np.float32(imageToTest_padded[:, :, :, np.newaxis]),
            (3, 0, 1, 2))  # required shape (1, width, height, channels)
        #print("Input shape: " + str(input_img.shape))

        output_blobs = model_output
        #print("Output shape (heatmap): " + str(output_blobs[1].shape))

        # extract outputs, resize, and remove padding
        heatmap = np.squeeze(output_blobs[1])  # output 1 is heatmaps
        heatmap = cv2.resize(heatmap, (0, 0),
                             fx=model_params['stride'],
                             fy=model_params['stride'],
                             interpolation=cv2.INTER_CUBIC)
        heatmap = heatmap[:imageToTest_padded.shape[0] -
                          pad[2], :imageToTest_padded.shape[1] - pad[3], :]
        heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]),
                             interpolation=cv2.INTER_CUBIC)

        paf = np.squeeze(output_blobs[0])  # output 0 is PAFs
        paf = cv2.resize(paf, (0, 0),
                         fx=model_params['stride'],
                         fy=model_params['stride'],
                         interpolation=cv2.INTER_CUBIC)
        paf = paf[:imageToTest_padded.shape[0] -
                  pad[2], :imageToTest_padded.shape[1] - pad[3], :]
        paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]),
                         interpolation=cv2.INTER_CUBIC)

        heatmap_avg = heatmap_avg + heatmap / len(multiplier)
        paf_avg = paf_avg + paf / len(multiplier)

        #pcm_cmp = pcm_cmp.reshape(picsize[0],picsize[1],19).astype("float32")
        #paf_cmp = paf_cmp.reshape(picsize[0],picsize[1],38).astype("float32")

    return heatmap_avg, paf_avg
コード例 #3
0
 def __init__(self):
     self.param, self.model = config_reader()
     self.input_node = tf.placeholder(tf.float32,shape=(1, None, None, 3))
     self.net = models.Skeleton({'data': self.input_node})
     self.sess = tf.Session()
     print('Loading the model')
     self.net.load("/s/parsons/h/proj/vision/usr/prady/caffe-tensorflow/examples/imagenet/data.npy", self.sess)
     print "Loading done"
コード例 #4
0
 def __init__(self):
     self.EMAIL_LOG_NAME = 'email_log.ini'
     self.cr = config_reader()
     self.email_config = self.cr.get_email_settings()
     self.email_log_dir = self.cr.get_email_log()
     self.feedback = feedback()
     self.late_submission_list = late_submit_checker().late_submitter_log()
     self.late_penalty = int(self.cr.get_late_submit_penalty())
コード例 #5
0
 def load_model(self):
     """
     加载模型
     """
     model = get_testing_model()
     model.load_weights(self.weights_path)
     params, model_params = config_reader(self.config_path)
     return model, params, model_params
コード例 #6
0
def Hand_Inference(oriImg, Model=None, Name=""):
    num_classes = 22
    if Model == None:
        model = openpose_hand(num_classes=num_classes)
        model.load_state_dict(torch.load(weight_name))
        model = torch.nn.DataParallel(model).cuda().float()
        model.eval()
    else:
        model = Model
    param_, model_ = config_reader('config_hand')

    #torch.nn.functional.pad(img pad, mode='constant', value=model_['padValue'])
    # tic = time.time()

    #test_image = 'a.jpg'
    with torch.no_grad():
        # imageToTest = Variable(T.unsqueeze(torch.from_numpy(oriImg.transpose(2,0,1)).float(),0)).cuda()
        # multiplier = [x * model_['boxsize'] / oriImg.shape[0] for x in param_['scale_search']]
        heatmap_avg = torch.zeros(1, num_classes, oriImg.shape[0],
                                  oriImg.shape[1]).cuda()

    imageToTest_padded = oriImg[:, :, :, np.newaxis].transpose(
        3, 2, 0, 1).astype(np.float32) / 255.0 - 0.5

    with torch.no_grad():
        feed = T.from_numpy(imageToTest_padded).cuda()
        print("img:", feed.sum())
        output2 = model({'img': feed})['heatmap'][-1]
    print(output2.size(), output2.sum())
    heatmap = output2
    heatmap_avg[m] = heatmap[0].data

    heatmap_avg = T.mean(heatmap_avg, 0)
    heatmap_avg = T.transpose(T.transpose(T.squeeze(heatmap_avg), 0, 1), 1,
                              2).cuda()
    heatmap_avg = heatmap_avg.cpu().numpy()

    # toc =time.time()
    # print 'time is %.5f'%(toc-tic)
    # tic = time.time()

    all_peaks = []
    peak_counter = 0

    #maps =
    keypoint_coords = np.zeros((21, 2))
    for part in range(21):
        map_ori = heatmap_avg[:, :, part]
        plt.imshow(oriImg[:, :, [2, 1, 0]])
        plt.imshow(heatmap_avg[:, :, part], alpha=.3)
        plt.savefig("demo_heat/test_part_" + str(part) + ".png")
        # plt.savefig("demo_heat/test_part_"+ str(part) +"for" + '_'.join(Name.split('/')[-1].split(".")) + ".png")

        v, u = np.unravel_index(np.argmax(heatmap_avg[:, :, part]), (256, 256))
        keypoint_coords[part, 0] = u
        keypoint_coords[part, 1] = v

    return keypoint_coords
コード例 #7
0
 def __init__(self):
     cmd2.Cmd.__init__(self)
     self.cr = config_reader()
     self.utility = utility()
     self.feedback = feedback()
     self.statistics = statistics()
     self.es = email_sender()
     self.late_submit_checker = late_submit_checker()
     os.chdir('ASSIGNMENTS')
コード例 #8
0
 def __init__(self):
     self.param, self.model = config_reader()
     if self.param['use_gpu'] == 1:
         caffe.set_device(0)
         caffe.set_mode_gpu()
     else:
         caffe.set_mode_cpu()
     self.net = caffe.Net(self.model['deployFile'],
                          self.model['caffemodel'], caffe.TEST)
コード例 #9
0
def transformation():
    """Do an inference on a single batch of data. In this sample server, we take data as CSV, convert
    it to a pandas data frame for internal use and then convert the predictions back to CSV (which really
    just means one prediction per line, since there's a single column.
    """
    encoded = flask.request.data
    nparr = np.frombuffer(encoded, np.uint8)
    oriImg = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    param, model_params = config_reader()
    scale = [x * model_params['boxsize'] / oriImg.shape[0] for x in param['scale_search']][3]                      
    heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
    #paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))

    imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
    imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_params['stride'], model_params['padValue'])        

    input_img = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,0,1,2)) # required shape (1, width, height, channels) 
    print("Input shape: " + str(input_img.shape))
    output_blobs = ScoringService.predict(input_img)
    heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps
    heatmap = cv2.resize(heatmap, (0,0), fx=model_params['stride'], fy=model_params['stride'], interpolation=cv2.INTER_CUBIC)
    heatmap = heatmap[:imageToTest_padded.shape[0]-pad[2], :imageToTest_padded.shape[1]-pad[3], :]
    heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)

    heatmap_avg = heatmap_avg + heatmap
    all_peaks = []
    peak_counter = 0
    for part in range(19-1):
        map_ori = heatmap_avg[:,:,part]
        map = gaussian_filter(map_ori, sigma=3)

        map_left = np.zeros(map.shape)
        map_left[1:,:] = map[:-1,:]
        map_right = np.zeros(map.shape)
        map_right[:-1,:] = map[1:,:]
        map_up = np.zeros(map.shape)
        map_up[:,1:] = map[:,:-1]
        map_down = np.zeros(map.shape)
        map_down[:,:-1] = map[:,1:]

        peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > param['thre1']))
        peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
        peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
        id = range(peak_counter, peak_counter + len(peaks))
        peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]

        all_peaks.append(peaks_with_score_and_id)
        peak_counter += len(peaks)
        
    predictions = all_peaks
    # Convert from numpy back to CSV
    out = StringIO.StringIO()
    pd.DataFrame({'results':predictions}).to_csv(out, header=False, index=False)
    result = out.getvalue()

    return flask.Response(response=result, status=200, mimetype='text/csv')
コード例 #10
0
def get_open_pose():

    #load model
    model = get_testing_model()
    model.load_weights(keras_weights_file)

    # load config
    params, model_params = config_reader()

    return model, params, model_params
コード例 #11
0
def initialization():
    param, model = config_reader()

    if param['use_gpu']:
        caffe.set_mode_gpu()
        caffe.set_device(param['GPUdeviceNumber'])  # set to your device!
    else:
        caffe.set_mode_cpu()
    net = caffe.Net(model['deployFile'], model['caffemodel'], caffe.TEST)

    return net, param, model
コード例 #12
0
    def _heatmap(self, img):
        ############################### Setup the heatmap vars. ########################
        param, model_params = config_reader()
        multiplier = [
            x * model_params['boxsize'] / img.shape[0]
            for x in param['scale_search']
        ]

        ############################### Get the heatmap ################################
        heatmap_avg = np.zeros((img.shape[0], img.shape[1], 19))
        paf_avg = np.zeros((img.shape[0], img.shape[1], 38))

        for m in range(len(multiplier)):
            scale = multiplier[m]
            imageToTest = cv2.resize(img, (0, 0),
                                     fx=scale,
                                     fy=scale,
                                     interpolation=cv2.INTER_CUBIC)
            imageToTest_padded, pad = util.padRightDownCorner(
                imageToTest, model_params['stride'], model_params['padValue'])

            input_img = np.transpose(
                np.float32(imageToTest_padded[:, :, :, np.newaxis]),
                (3, 0, 1, 2))  # required shape (1, width, height, channels)

            output_blobs = self.model.predict(input_img)

            # extract outputs, resize, and remove padding
            heatmap = np.squeeze(output_blobs[1])  # output 1 is heatmaps
            heatmap = cv2.resize(heatmap, (0, 0),
                                 fx=model_params['stride'],
                                 fy=model_params['stride'],
                                 interpolation=cv2.INTER_CUBIC)
            heatmap = heatmap[:imageToTest_padded.shape[0] -
                              pad[2], :imageToTest_padded.shape[1] - pad[3], :]
            heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]),
                                 interpolation=cv2.INTER_CUBIC)

            paf = np.squeeze(output_blobs[0])  # output 0 is PAFs
            paf = cv2.resize(paf, (0, 0),
                             fx=model_params['stride'],
                             fy=model_params['stride'],
                             interpolation=cv2.INTER_CUBIC)
            paf = paf[:imageToTest_padded.shape[0] -
                      pad[2], :imageToTest_padded.shape[1] - pad[3], :]
            paf = cv2.resize(paf, (img.shape[1], img.shape[0]),
                             interpolation=cv2.INTER_CUBIC)

            # visualization
            heatmap_avg = heatmap_avg + heatmap / len(multiplier)
            paf_avg = paf_avg + paf / len(multiplier)

        return (heatmap_avg, paf_avg)
コード例 #13
0
def import_param():
    #Importing the configuration model :
    param, model = config_reader()
    print("param :", param)
    print("model :", model)
    if param['use_gpu']:
        caffe.set_mode_gpu()
        caffe.set_device(param['GPUdeviceNumber'])  # set to your device!
    else:
        caffe.set_mode_cpu()
    net = caffe.Net(model['deployFile'], model['caffemodel'], caffe.TEST)
    return (param, model, net)
    print("Net Loaded")
コード例 #14
0
ファイル: Spider.py プロジェクト: HuAndrew/hot_weibo
def main():
    #inteval = 5minutes
    #total time about 24hours
    #get proper userid and weiboid
    filter = 1  # 值为0表示爬取全部微博(原创微博+转发微博),值为1表示只爬取原创微博

    ##test1
    param = config_reader()
    user_ids = param['userids']
    cookie = {"Cookie": ""}
    cookie['Cookie'] = param['cookie']

    proper_infos = get_proper_info(cookie, user_ids)
    print proper_infos
コード例 #15
0
def segmentation(model, input_folder, output_folder, scale):
    keras_weights_file = model

    print('start processing...')
    # load model
    model = get_testing_model_resnet101()
    model.load_weights(keras_weights_file)
    params, model_params = config_reader()
    scale_list = []
    for item in scale:
        scale_list.append(float(item))

    params['scale_search'] = scale_list
    seg_dict = {}
    kpts_dict = {}

    # generate image with body parts
    for filename in os.listdir(input_folder):
        if filename.endswith(".png") or filename.endswith(".jpg"):
            print(input_folder + '/' + filename)

            #------------------This is what you need------------------------------------------------
            #kpts should contain what you need
            canvas, seg, kpts = process(input_folder + '/' + filename, params,
                                        model_params, model)
            #specifically, it is a dictionary with keys 1, 2, and 5 (rather arbitrary for now)
            assert 1 in kpts.keys()
            assert 2 in kpts.keys()
            assert 5 in kpts.keys()
            #kpts[1] should be a tuple of neck coords, kpts[2] left shoulder, and kpts[5] right shoulder
            #Use them in cv2 order, which is to say the tuples should be ordered (ycoord, xcoord)
            # ------------------This is what you need------------------------------------------------

            cv2.imwrite(output_folder + '/sk_' + filename, canvas)

            seg_argmax = np.argmax(seg, axis=-1)
            seg_max = np.max(seg, axis=-1)
            seg_max_thres = (seg_max > 0.1).astype(np.uint8)
            seg_argmax *= seg_max_thres

            seg_dict[filename] = seg_argmax
            kpts_dict[filename] = kpts
            #not completely necessary

            filename = '%s/%s.jpg' % (output_folder,
                                      'seg_' + os.path.splitext(filename)[0])
            cv2.imwrite(filename, seg_argmax)

    return seg_dict, kpts_dict
コード例 #16
0
def Pose(Img_path):

    input_image = Img_path

    print('start processing...')

    params, model_params = config_reader()
    res = process(input_image, params, model_params)
    canvas = res[0]
    people_count = res[1]
    print(people_count)
    canvas = cv2.resize(canvas,(256,256),interpolation=cv2.INTER_CUBIC)
    org = cv_imread(input_image)
    org = cv2.resize(org,(256,256),interpolation=cv2.INTER_CUBIC)
    return canvas,people_count
コード例 #17
0
ファイル: CPM_demo.py プロジェクト: shihenw/cpm-website
def detect(filename):
    param, model = config_reader()

    image = cv.imread(filename)
    if image.shape[0] > 500:
        scale = 500.0 / image.shape[0]
        image = cv.resize(image, (0, 0),
                          fx=scale,
                          fy=scale,
                          interpolation=cv.INTER_CUBIC)
        cv.imwrite(filename, image)
    #print sys.argv[1]
    #test_image = sys.argv[1] #'../sample_image/im1429.jpg'
    heatmaps, prediction = applymodel(filename, param, model)
    visualize_save(filename, heatmaps, prediction, param, model)  # save images
コード例 #18
0
def main():
    # read in config for steam launch options manger
    config = config_reader.config_reader()
    config.read()
    # read in steam config
    p = steam_config_utils.config_getter()
    data = p.open_file(config.steam_path)
    # create backup
    p.backup_config(data, initialise.initialise().get_backup_path(),
                    "localconfig.vdf.backup"+str(int(time.time())))
    # modify steam backup
    modifer = modify_config.modifier(config.rules)
    modifer.modify(data)
    print("Saving modified file")
    p.save_file(data, config.steam_path)
    pass
コード例 #19
0
    def __init__(self):
        # init model
        self.param, self.model = config_reader()
        # multiplier = [x * model['boxsize'] / oriImg.shape[0] for x in param['scale_search']]
        if self.param['use_gpu']:
            print "gpu model"
            caffe.set_mode_gpu()
            caffe.set_device(
                self.param['GPUdeviceNumber'])  # set to your device!
        else:
            print "cpu model"
            caffe.set_mode_cpu()

        self.net = caffe.Net(self.model['deployFile'],
                             self.model['caffemodel'], caffe.TEST)
        self.resultImagePath = ""
コード例 #20
0
def std_main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image', type=str, required=True, help='input image')
    parser.add_argument('--output',
                        type=str,
                        default='result.png',
                        help='output image')
    parser.add_argument('--model',
                        type=str,
                        default='model/keras/model.h5',
                        help='path to the weights file')

    args = parser.parse_args()
    image_path = args.image
    output = args.output
    keras_weights_file = args.model

    tic = time.time()
    print('start processing...')

    # load model

    # authors of original model don't use
    # vgg normalization (subtracting mean) on input images
    model = get_testing_model()
    model.load_weights(keras_weights_file)

    # load config
    params, model_params = config_reader()

    input_image = cv2.imread(image_path)  # B,G,R order

    all_peaks, subset, candidate = extract_parts(input_image, params, model,
                                                 model_params)
    canvas = draw(input_image, all_peaks, subset, candidate)

    toc = time.time()
    print('processing time is %.5f' % (toc - tic))

    cv2.imwrite(output, canvas)

    cv2.destroyAllWindows()
コード例 #21
0
def predict_img(image_path, model, output_img, output_pos):
    # load config
    params, model_params = config_reader()

    input_image = cv2.imread(image_path)  # B,G,R order

    all_peaks, subset, candidate = extract_parts(input_image, params, model,
                                                 model_params)

    for peak in all_peaks:
        if peak:
            write_line(output_pos, "{}".format(peak[0]))
        else:
            write_line(output_pos, "")

    # canvas = draw(input_image, all_peaks, subset, candidate)
    canvas = draw_skeleton(input_image, subset, candidate)

    cv2.imwrite(output_img, canvas)
    cv2.destroyAllWindows()
コード例 #22
0
    def __init__(self, str):
        # This is to be able to have two instances at the same time
        self.str = str
        workspace.SwitchWorkspace(self.str, True)
        #Inizializate the parameters and the Caffe2 files
        self.param, self.model = config_reader()
        INIT_NET = os.path.join(CAFFE_MODELS, "init_net.pb")
        print 'INIT_NET = ', INIT_NET
        PREDICT_NET = os.path.join(CAFFE_MODELS, "predict_net.pb")
        print 'PREDICT_NET = ', PREDICT_NET
        self.device_opts = core.DeviceOption(caffe2_pb2.CUDA, 0)
        self.init_def = caffe2_pb2.NetDef()
        with open(INIT_NET) as f:
            self.init_def.ParseFromString(f.read())
            self.init_def.device_option.CopyFrom(self.device_opts)
            workspace.RunNetOnce(self.init_def.SerializeToString())

        self.net_def = caffe2_pb2.NetDef()
        with open(PREDICT_NET) as f:
            self.net_def.ParseFromString(f.read())
            self.net_def.device_option.CopyFrom(self.device_opts)
            workspace.CreateNet(self.net_def.SerializeToString(), True)
コード例 #23
0
ファイル: predictor.py プロジェクト: SS-JIA/openpose-py
    def __init__(self, config_path=default_config):
        self.config_path = config_path

        ## Load the model parameters from config file
        self.param, self.model = config_reader(self.config_path)

        ## Set up Caffe model
        if self.param['use_gpu']:
            caffe.set_mode_gpu()
            caffe.set_device(
                self.param['GPUdeviceNumber'])  # set to your device!
        else:
            caffe.set_mode_cpu()

        self.net = caffe.Net(self.model['deployFile'],
                             self.model['caffemodel'], caffe.TEST)

        ## Pose Model Specs
        self.num_keypoints = int(self.model['np'])
        self.limb_from = [int(x) for x in self.model['limb_from']]
        self.limb_to = [int(x) for x in self.model['limb_to']]
        self.limb_order = [int(x) for x in self.model['limb_order']]
        self.num_limbs = len(self.limb_from)
コード例 #24
0
 def setup(self, bottom, top):
     if len(bottom) != 1:
         raise Exception('must have exactly one input')
     if len(top) != 9:
         raise Exception('must have exactly one output')
     self.param, self.model = config_reader()
     boxsize = self.model['boxsize']
     npart = self.model['np']
     if self.param['use_gpu']:
         caffe.set_mode_gpu()
     else:
         caffe.set_mode_cpu()
     caffe.set_device(self.param['GPUdeviceNumber'])
     self.pose_net = caffe.Net(
         self.model['deployFile'], self.model['caffemodel'],
         caffe.TEST)  #architecture and learnt model file
     self.pose_net.forward()
     self.factor = 2  # factor to scale the input image
     self.batch = bottom[0].shape[0]
     self.chanels = bottom[0].shape[1]
     self.height = bottom[0].shape[
         2] * self.factor  # input image height after scaling
     self.width = bottom[0].shape[
         3] * self.factor  # input image width after scaling
コード例 #25
0
    def __init__(self):
        params, model = config_reader()
        self.boxsize = model['boxsize']
        self.center = self.boxsize / 2
        self.npart = model['np']
        self.target_height = float(model['target_height'])

        # caffe.reset_all()
        if params['use_gpu']:
            caffe.set_mode_gpu()
            caffe.set_device(params['GPUdeviceNumber'])
        else:
            caffe.set_mode_cpu()
        self.hand_net = caffe.Net(model['deployFile'], model['caffemodel'], caffe.TEST)
        # self.hand_net = caffe.Net(model['deployFile'], caffe.TEST)
        # self.hand_net.copy_from(model['caffemodel'])
        self.hand_net.forward()
        self.prediction = np.zeros((self.npart, 2))
        self.gaussian_map = np.zeros((self.boxsize, self.boxsize))
        for y_p in range(self.boxsize):
            for x_p in range(self.boxsize):
                dist_sq = (x_p - self.center) * (x_p - self.center) + (y_p - self.center) * (y_p - self.center)
                exponent = dist_sq / 2.0 / model['sigma'] / model['sigma']
                self.gaussian_map[y_p, x_p] = math.exp(-exponent)
コード例 #26
0
ファイル: Spider.py プロジェクト: HuAndrew/hot_weibo
def task():
    try:
        filter = 1  # 值为0表示爬取全部微博(原创微博+转发微博),值为1表示只爬取原创微博
        param = config_reader()
        user_ids = param['userids']
        cookie = {"Cookie": ""}
        cookie['Cookie'] = param['cookie']

        #proper_infos = get_proper_info(cookie,user_ids)
        #print proper_infos

        #proper_infos = [['1197191492', 'M_GcGGXEkxM'], ['5127716917', 'M_GctZ0xuOz'], ['1886437464', 'M_GclEqckbg'], ['1337925752', 'M_GcGBvizIY'], ['1864507535', 'M_GcnRV7hhr'], ['2032640064', 'M_GcImKDFdw'], ['5585682587', 'M_GcI62zfbD'], ['3083673764', 'M_GcEcA9bnd']]
        proper_infos = [['1197191492', 'M_GcMn6pFEH']]
        #print proper_infos
        for i in range(len(proper_infos)):
            data = []
            wb = Weibo(int(proper_infos[i][0]), filter)  # 调用Weibo类,创建微博实例wb
            wb.cookie['Cookie'] = param['cookie']
            #wb.start()
            wb.start_data(proper_infos[i][1])
            #这三个参数用于计算pageRank的值,影响因子
            print u"用户名:" + wb.username
            # print u"全部微博数:" + str(wb.weibo_num)
            # print u"关注数:" + str(wb.following)
            # print u"粉丝数:" + str(wb.followers)

            print proper_infos[i]
            now_time = datetime.now().strftime('%Y-%m-%d %H:%M')
            temp = [wb.up_num[0], wb.retweet_num[0], wb.comment_num[0], now_time]
            data.append(temp)
            print temp
            write_data(proper_infos[i][0],data,i)
            #write_data(proper_infos[i][0], data, index)
    except Exception, e:
        print "Error: ", e
        traceback.print_exc()
コード例 #27
0
import time
from config_reader import config_reader
param_, model_ = config_reader()

TIME_PRINT = param_['print_tictoc'] is '1'

TIME_PROBE_ID = 0
TIME_STACK = []
TEXT_PADDING = 24
FIRST_STAMP = None
PREV_STAMP = None


class TimeStamp:
    def __init__(self, label):
        self.children = []
        self.elapsed = -1
        self.begun = time.time()
        self.label = label

    def pretty(self, level=0, percentage=100.0):
        tabbing = ''.join(level * ['   '])
        equal_padding = ''.join((TEXT_PADDING - len(self.label)) * [' '])
        result = '| %s|__%s%s: %.2f%% (%.6fs)' % (
            tabbing, self.label, equal_padding, percentage, self.elapsed)
        return result


def time_printout(stamp, level=0):
    accounted_percentage = 0.0
    for child in stamp.children:
コード例 #28
0
    image_path = args.image
    output = args.output
    keras_weights_file = args.model

    tic = time.time()
    print('start processing...')

    # load model

    # authors of original model don't use
    # vgg normalization (subtracting mean) on input images
    model = get_testing_model()
    model.load_weights(keras_weights_file)

    # load config
    params, model_params = config_reader()

    #immagine da classificare
    input_image = cv2.imread('images.jpg')  # B,G,R order

    body_parts, all_peaks, subset, candidate = extract_parts(
        input_image, params, model, model_params)
    canvas, dict, lis1, lis2 = draw(input_image, all_peaks, subset, candidate)

    cv2.imwrite(output, canvas)

    Concatena.salva_csv_dist(lis1, lis2, 'none')

    dataframe = pandas.read_csv("dataset_dist.csv")

    dataset = dataframe.values
コード例 #29
0
import PIL.Image
import math
import caffe
import time
from config_reader import config_reader
import util
import copy
import matplotlib
import pylab as plt

test_image = 'sample_image/ski.jpg'

oriImg = cv.imread(test_image)
f = plt.imshow(oriImg[:, :, [2, 1, 0]])

param, model = config_reader()
multiplier = [
    x * model['boxsize'] / oriImg.shape[0] for x in param['scale_search']
]

if param['use_gpu']:
    caffe.set_mode_gpu()
    caffe.set_device(0)
else:
    caffe.set_mode_cpu()
net = caffe.Net(model['deployFile'], model['caffemodel'], caffe.TEST)

heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
f, axarr = plt.subplots(1, len(multiplier))
f.set_size_inches((20, 5))
コード例 #30
0
ファイル: demo.py プロジェクト: hdpklm/ewoNeuralBackend
def infer(image_path):

    weights_path = "model/keras/model.h5"

    input_shape = (None, None, 3)

    img_input = Input(shape=input_shape)

    stages = 6
    np_branch1 = 38
    np_branch2 = 19

    img_normalized = Lambda(lambda x: x / 256 - 0.5)(img_input)  # [-0.5, 0.5]

    # VGG
    stage0_out = vgg_block(img_normalized)

    # stage 1
    stage1_branch1_out = stage1_block(stage0_out, np_branch1, 1)
    stage1_branch2_out = stage1_block(stage0_out, np_branch2, 2)
    x = Concatenate()([stage1_branch1_out, stage1_branch2_out, stage0_out])

    # stage t >= 2
    for sn in range(2, stages + 1):
        stageT_branch1_out = stageT_block(x, np_branch1, sn, 1)
        stageT_branch2_out = stageT_block(x, np_branch2, sn, 2)
        if (sn < stages):
            x = Concatenate()(
                [stageT_branch1_out, stageT_branch2_out, stage0_out])

    model = Model(img_input, [stageT_branch1_out, stageT_branch2_out])
    model.load_weights(weights_path)

    #%matplotlib inline
    import cv2
    import matplotlib
    import pylab as plt
    import numpy as np
    import util

    test_image = image_path
    #test_image = 'sample_images/input.jpg'
    oriImg = cv2.imread(test_image)
    #resize the image - haritha
    #oriImg = cv2.resize(oriImg, (0,0), fx=0.1, fy=0.1)

    # B,G,R order
    plt.imshow(oriImg[:, :, [2, 1, 0]])

    param, model_params = config_reader()

    multiplier = [
        x * model_params['boxsize'] / oriImg.shape[0]
        for x in param['scale_search']
    ]

    heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
    paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
    # first figure shows padded images
    f, axarr = plt.subplots(1, len(multiplier))
    f.set_size_inches((20, 5))
    # second figure shows heatmaps
    f2, axarr2 = plt.subplots(1, len(multiplier))
    f2.set_size_inches((20, 5))
    # third figure shows PAFs
    f3, axarr3 = plt.subplots(2, len(multiplier))
    f3.set_size_inches((20, 10))

    # updated the range to 1 to increase performance - haritha
    for m in range(1):
        scale = multiplier[m]
        imageToTest = cv2.resize(oriImg, (0, 0),
                                 fx=scale,
                                 fy=scale,
                                 interpolation=cv2.INTER_CUBIC)
        imageToTest_padded, pad = util.padRightDownCorner(
            imageToTest, model_params['stride'], model_params['padValue'])
        axarr[m].imshow(imageToTest_padded[:, :, [2, 1, 0]])
        axarr[m].set_title('Input image: scale %d' % m)

        input_img = np.transpose(
            np.float32(imageToTest_padded[:, :, :, np.newaxis]),
            (3, 0, 1, 2))  # required shape (1, width, height, channels)
        print("Input shape: " + str(input_img.shape))

        #calculate output
        output_blobs = model.predict(input_img)
        print("Output shape (heatmap): " + str(output_blobs[1].shape))

        # extract outputs, resize, and remove padding
        heatmap = np.squeeze(output_blobs[1])  # output 1 is heatmaps
        heatmap = cv2.resize(heatmap, (0, 0),
                             fx=model_params['stride'],
                             fy=model_params['stride'],
                             interpolation=cv2.INTER_CUBIC)
        heatmap = heatmap[:imageToTest_padded.shape[0] -
                          pad[2], :imageToTest_padded.shape[1] - pad[3], :]
        heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]),
                             interpolation=cv2.INTER_CUBIC)
        heatmap_avg = heatmap
        paf = np.squeeze(output_blobs[0])  # output 0 is PAFs
        paf = cv2.resize(paf, (0, 0),
                         fx=model_params['stride'],
                         fy=model_params['stride'],
                         interpolation=cv2.INTER_CUBIC)
        paf = paf[:imageToTest_padded.shape[0] -
                  pad[2], :imageToTest_padded.shape[1] - pad[3], :]
        paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]),
                         interpolation=cv2.INTER_CUBIC)

        ##visualization
        axarr2[m].imshow(oriImg[:, :, [2, 1, 0]])
        ax2 = axarr2[m].imshow(heatmap[:, :, 3], alpha=.5)  # right elbow
        axarr2[m].set_title('Heatmaps (Relb): scale %d' % m)

        axarr3.flat[m].imshow(oriImg[:, :, [2, 1, 0]])
        ax3x = axarr3.flat[m].imshow(paf[:, :, 16], alpha=.5)  # right elbow
        axarr3.flat[m].set_title('PAFs (x comp. of Rwri to Relb): scale %d' %
                                 m)
        axarr3.flat[len(multiplier) + m].imshow(oriImg[:, :, [2, 1, 0]])
        ax3y = axarr3.flat[len(multiplier) + m].imshow(paf[:, :, 17],
                                                       alpha=.5)  # right wrist
        axarr3.flat[len(multiplier) + m].set_title(
            'PAFs (y comp. of Relb to Rwri): scale %d' % m)

        heatmap_avg = heatmap_avg + heatmap / len(multiplier)
        paf_avg = paf_avg + paf / len(multiplier)

    f2.subplots_adjust(right=0.93)
    cbar_ax = f2.add_axes([0.95, 0.15, 0.01, 0.7])
    _ = f2.colorbar(ax2, cax=cbar_ax)

    f3.subplots_adjust(right=0.93)
    cbar_axx = f3.add_axes([0.95, 0.57, 0.01, 0.3])
    _ = f3.colorbar(ax3x, cax=cbar_axx)
    cbar_axy = f3.add_axes([0.95, 0.15, 0.01, 0.3])
    _ = f3.colorbar(ax3y, cax=cbar_axy)

    plt.imshow(oriImg[:, :, [2, 1, 0]])
    plt.imshow(heatmap_avg[:, :, 3], alpha=.5)
    plt.imshow(heatmap_avg[:, :, 4], alpha=.5)
    fig = matplotlib.pyplot.gcf()
    cax = matplotlib.pyplot.gca()
    fig.set_size_inches(20, 20)
    fig.subplots_adjust(right=0.93)
    cbar_ax = fig.add_axes([0.95, 0.15, 0.01, 0.7])
    _ = fig.colorbar(ax2, cax=cbar_ax)

    from numpy import ma
    U = paf_avg[:, :, 16] * -1
    V = paf_avg[:, :, 17]
    X, Y = np.meshgrid(np.arange(U.shape[1]), np.arange(U.shape[0]))
    M = np.zeros(U.shape, dtype='bool')
    M[U**2 + V**2 < 0.5 * 0.5] = True
    U = ma.masked_array(U, mask=M)
    V = ma.masked_array(V, mask=M)

    # 1
    plt.figure()
    plt.imshow(oriImg[:, :, [2, 1, 0]], alpha=.5)
    s = 5
    Q = plt.quiver(X[::s, ::s],
                   Y[::s, ::s],
                   U[::s, ::s],
                   V[::s, ::s],
                   scale=50,
                   headaxislength=4,
                   alpha=.5,
                   width=0.001,
                   color='r')

    fig = matplotlib.pyplot.gcf()
    fig.set_size_inches(20, 20)

    from scipy.ndimage.filters import gaussian_filter
    all_peaks = []
    peak_counter = 0

    for part in range(19 - 1):
        map_ori = heatmap_avg[:, :, part]
        map = gaussian_filter(map_ori, sigma=3)

        map_left = np.zeros(map.shape)
        map_left[1:, :] = map[:-1, :]
        map_right = np.zeros(map.shape)
        map_right[:-1, :] = map[1:, :]
        map_up = np.zeros(map.shape)
        map_up[:, 1:] = map[:, :-1]
        map_down = np.zeros(map.shape)
        map_down[:, :-1] = map[:, 1:]

        peaks_binary = np.logical_and.reduce(
            (map >= map_left, map >= map_right, map >= map_up, map >= map_down,
             map > param['thre1']))
        peaks = list(
            zip(np.nonzero(peaks_binary)[1],
                np.nonzero(peaks_binary)[0]))  # note reverse
        peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks]
        id = range(peak_counter, peak_counter + len(peaks))
        peaks_with_score_and_id = [
            peaks_with_score[i] + (id[i], ) for i in range(len(id))
        ]

        all_peaks.append(peaks_with_score_and_id)
        peak_counter += len(peaks)

    # find connection in the specified sequence, center 29 is in the position 15
    limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
            [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
            [1,16], [16,18], [3,17], [6,18]]
    # the middle joints heatmap correpondence
    mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], \
            [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], \
            [55,56], [37,38], [45,46]]

    connection_all = []
    special_k = []
    mid_num = 10

    for k in range(len(mapIdx)):
        score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
        candA = all_peaks[limbSeq[k][0] - 1]
        candB = all_peaks[limbSeq[k][1] - 1]
        nA = len(candA)
        nB = len(candB)
        indexA, indexB = limbSeq[k]
        if (nA != 0 and nB != 0):
            connection_candidate = []
            for i in range(nA):
                for j in range(nB):
                    vec = np.subtract(candB[j][:2], candA[i][:2])
                    norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
                    # failure case when 2 body parts overlaps
                    if norm == 0:
                        continue
                    vec = np.divide(vec, norm)

                    startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
                                np.linspace(candA[i][1], candB[j][1], num=mid_num)))

                    vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
                                    for I in range(len(startend))])
                    vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
                                    for I in range(len(startend))])

                    score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(
                        vec_y, vec[1])
                    score_with_dist_prior = sum(
                        score_midpts) / len(score_midpts) + min(
                            0.5 * oriImg.shape[0] / norm - 1, 0)
                    criterion1 = len(
                        np.nonzero(score_midpts > param['thre2'])
                        [0]) > 0.8 * len(score_midpts)
                    criterion2 = score_with_dist_prior > 0
                    if criterion1 and criterion2:
                        connection_candidate.append([
                            i, j, score_with_dist_prior,
                            score_with_dist_prior + candA[i][2] + candB[j][2]
                        ])

            connection_candidate = sorted(connection_candidate,
                                          key=lambda x: x[2],
                                          reverse=True)
            connection = np.zeros((0, 5))
            for c in range(len(connection_candidate)):
                i, j, s = connection_candidate[c][0:3]
                if (i not in connection[:, 3] and j not in connection[:, 4]):
                    connection = np.vstack(
                        [connection, [candA[i][3], candB[j][3], s, i, j]])
                    if (len(connection) >= min(nA, nB)):
                        break

            connection_all.append(connection)
        else:
            special_k.append(k)
            connection_all.append([])

    # last number in each row is the total parts number of that person
    # the second last number in each row is the score of the overall configuration
    subset = -1 * np.ones((0, 20))
    candidate = np.array([item for sublist in all_peaks for item in sublist])

    for k in range(len(mapIdx)):
        if k not in special_k:
            partAs = connection_all[k][:, 0]
            partBs = connection_all[k][:, 1]
            indexA, indexB = np.array(limbSeq[k]) - 1

            for i in range(len(connection_all[k])):  #= 1:size(temp,1)
                found = 0
                subset_idx = [-1, -1]
                for j in range(len(subset)):  #1:size(subset,1):
                    if subset[j][indexA] == partAs[i] or subset[j][
                            indexB] == partBs[i]:
                        subset_idx[found] = j
                        found += 1

                if found == 1:
                    j = subset_idx[0]
                    if (subset[j][indexB] != partBs[i]):
                        subset[j][indexB] = partBs[i]
                        subset[j][-1] += 1
                        subset[j][-2] += candidate[partBs[i].astype(int),
                                                   2] + connection_all[k][i][2]
                elif found == 2:  # if found 2 and disjoint, merge them
                    j1, j2 = subset_idx
                    print("found = 2")
                    membership = ((subset[j1] >= 0).astype(int) +
                                  (subset[j2] >= 0).astype(int))[:-2]
                    if len(np.nonzero(membership == 2)[0]) == 0:  #merge
                        subset[j1][:-2] += (subset[j2][:-2] + 1)
                        subset[j1][-2:] += subset[j2][-2:]
                        subset[j1][-2] += connection_all[k][i][2]
                        subset = np.delete(subset, j2, 0)
                    else:  # as like found == 1
                        subset[j1][indexB] = partBs[i]
                        subset[j1][-1] += 1
                        subset[j1][-2] += candidate[
                            partBs[i].astype(int), 2] + connection_all[k][i][2]

                # if find no partA in the subset, create a new subset
                elif not found and k < 17:
                    row = -1 * np.ones(20)
                    row[indexA] = partAs[i]
                    row[indexB] = partBs[i]
                    row[-1] = 2
                    row[-2] = sum(
                        candidate[connection_all[k][i, :2].astype(int),
                                  2]) + connection_all[k][i][2]
                    subset = np.vstack([subset, row])

    # delete some rows of subset which has few parts occur
    deleteIdx = []
    for i in range(len(subset)):
        if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
            deleteIdx.append(i)
    subset = np.delete(subset, deleteIdx, axis=0)

    # visualize
    colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
            [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
            [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]

    #what does this line do?
    #cmap = matplotlib.cm.get_cmap('hsv')

    canvas = cv2.resize(cv2.imread(test_image), (0, 0), fx=0.1,
                        fy=0.1)  # B,G,R order

    # for i in range(1):
    #     rgba = np.array(cmap(1 - i/18. - 1./36))
    #     rgba[0:3] *= 255

    #show wrist (the 4th index is the right wrist)
    if (len(all_peaks[4]) > 0):
        cv2.circle(canvas, all_peaks[4][0][0:2], 4, colors[1], thickness=-1)
    #print(all_peaks)
    return (all_peaks[3][1])
import scipy
import PIL.Image
import math
import caffe
import time
from config_reader import config_reader
import util
import copy
import matplotlib
import pylab as plt
from scipy.ndimage.filters import gaussian_filter
import scipy
from numpy import ma


param, model = config_reader()


#if param['use_gpu']: 
#    caffe.set_mode_gpu()
#    caffe.set_device(param['GPUdeviceNumber']) # set to your device!
#else:
caffe.set_mode_cpu()
net = caffe.Net(model['deployFile'], model['caffemodel'], caffe.TEST)

# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
           [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
           [1,16], [16,18], [3,17], [6,18]]
           
# the middle joints heatmap correpondence
コード例 #32
0
    args = parser.parse_args()
    input_image = args.image
    output = args.output
    keras_weights_file = args.model

    tic = time.time()
    print('start processing...')

    # load model

    # authors of original model don't use
    # vgg normalization (subtracting mean) on input images
    model = get_testing_model()
    model.load_weights(keras_weights_file)

    # load config
    params, model_params = config_reader()

    # generate image with body parts
    canvas = process(input_image, params, model_params)

    toc = time.time()
    print ('processing time is %.5f' % (toc - tic))

    cv2.imwrite(output, canvas)

    cv2.destroyAllWindows()



コード例 #33
0
ファイル: main.py プロジェクト: Thoross/PaPyrus
#!/usr/bin/env python
# coding=UTF-8
'''
    Copyright (c) 2013 Brendan Betts
    Created by: Brendan Betts ([email protected])
    Created on: July 17th, 2013
'''
import config_reader
from wallpaper_utils import loop_tags

if __name__ == "__main__":
    print "Starting PaPyrus."
    reader = config_reader.config_reader("conf\config")
    config_options = reader.parse_config_file()
    loop_tags(config_options)
    print "All done!"
コード例 #34
0
        out5_2 = self.model5_2(out5)
        out6   = torch.cat([out5_1,out5_2,out1],1)         
              
        out6_1 = self.model6_1(out6)
        out6_2 = self.model6_2(out6)
        
        return out6_1,out6_2        


model = pose_model(models)     
model.load_state_dict(torch.load(weight_name))
model.cuda()
model.float()
model.eval()

param_, model_ = config_reader()

 
def handle_one(oriImg):
    
    # for visualize
    canvas = np.copy(oriImg)
    imageToTest = Variable(T.transpose(T.transpose(T.unsqueeze(torch.from_numpy(oriImg).float(),0),2,3),1,2),volatile=True).cuda()
    print oriImg.shape
    scale = model_['boxsize'] / float(oriImg.shape[0])
    print scale
    h = int(oriImg.shape[0]*scale)
    w = int(oriImg.shape[1]*scale)
    pad_h = 0 if (h%model_['stride']==0) else model_['stride'] - (h % model_['stride']) 
    pad_w = 0 if (w%model_['stride']==0) else model_['stride'] - (w % model_['stride'])
    new_h = h+pad_h