Ejemplo n.º 1
0
    def cleanOlds(self, frame):
        track_temp = []
        #for trk in self.tracklets:
        for trk in self.tracklets:
            life = frame - sorted(trk.points.keys())[-1]
            if life > self.vanish_time:
                # saving in file
                if len(trk.points) >= self.minlen:
                    name = self.name + '_' + str(trk.id) + self.token
                    line = ''
                    for frame_point in sorted(trk.points):
                        line += '%06d,' % frame_point
                        line += str(trk.points[frame_point][0]) + ' '
                        line += str(trk.points[frame_point][1]) + '\n'

                    u_save2File(name, line)
                    self.files.append(name)

            else:
                track_temp.append(trk)

            # setting to False each tracklet for Kalman automatick tracking
            trk.visited = False

        self.tracklets = track_temp
Ejemplo n.º 2
0
def detect_image_file(info, params, model_params):
    # generate image with body parts
    file_name = info['file']
    file_out = info['out']

    oriImg = cv2.imread(file_name)  # B,G,R order
    canvas, ans = process(oriImg, params, model_params)

    cv2.imwrite(file_out, canvas)
    u_save2File(os.path.splitext(file_out)[0] + '.txt', ans)
Ejemplo n.º 3
0
    def dump(self):
        for trk in self.tracklets:
            if len(trk.points) >= self.minlen:
                name = self.name + '_' + str(trk.id) + self.token
                line = ''

                for frame_point in sorted(trk.points):
                    line += '%06d,' % frame_point
                    line += str(trk.points[frame_point][0]) + ' '
                    line += str(trk.points[frame_point][1]) + '\n'

                u_save2File(name, line)
                self.files.append(name)
Ejemplo n.º 4
0
def detect_video_file(info, params, model_params):
    file_name = info['file']
    ini = info['ini']
    fin = info['fin']
    out_folder = info['out_folder']
    visual = info['visual']
    prop = info['prop']
    supported = info['supported']
    step = info['step']

    data = {}

    out_prop_folder = out_folder + '/props'
    base = os.path.basename(file_name)
    base = os.path.splitext(base)[0] + '.prop'
    name = out_prop_folder + '/' + base

    if os.path.isfile(name):
        print(' File previously processed ')
        return

    if not os.path.exists(out_folder):
        os.makedirs(out_folder)

    if step == 1:
        video = video_sequence_by1(file_name, ini, fin)
    else:
        video = video_sequence_byn(file_name, step, ini, fin)

    cap = cv2.VideoCapture(file_name)

    ret, frame = cap.read()

    print('Reading :' + file_name)

    # Iamge video generation ...................................................
    if visual > 0:
        if visual == 1:
            nframe = 0
            while (ret):
                name = out_folder + '/%05d' % (nframe) + '.png'
                print('Save: ', name)
                params_c = copy.deepcopy(params)
                model_params_c = copy.deepcopy(model_params)
                canvas, ans = process(frame, params_c, model_params_c)
                cv2.imwrite(name, canvas)
                #ret, frame = video.getCurrent()
                ret, frame = cap.read()
                nframe += 1

        # Video Pose image generation...........................................
        # visual = 2
        else:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            file_n = os.path.basename(file_name)
            file_n = os.path.splitext(file_n)[0] + '.avi'
            name_v = out_folder + '/' + file_n

            fps = int(video.cap.get(cv2.CAP_PROP_FPS))
            out = cv2.VideoWriter(name_v, fourcc, fps,
                                  (int(video.width), int(video.height)))

            pos = 1
            while (ret):
                print(pos)
                params_c = copy.deepcopy(params)
                model_params_c = copy.deepcopy(model_params)
                canvas, ans = process(frame, params_c, model_params_c)
                out.write(canvas)
                #print('frame')
                #print(video.current-1)
                #ret, frame = video.getCurrent()
                ret, frame = cap.read()
                pos += 1

            print('Save video in: ', name_v)
            data['out_video_file'] = name_v
            out.release()

    else:
        # Visual = 0
        # Tracklet file generation.....................................................

        print('Tracklet generation only')
        final_ans = ''

        # if you have previous human detection
        if supported:

            frame_list = []
            detection_file = os.path.splitext(file_name)[0] + '.txt'
            file = open(detection_file, 'r')

            for line in file:
                if len(line) < 1:
                    continue
                split_line = line.split(' ')
                frame_list.append(int(split_line[0]))

            frame_list = sorted(list(set(frame_list)))

            #print(frame_list)
            ret, frame = video.getCurrent()

            while (ret):
                #print (video.current)
                if (video.current - 1) in frame_list:
                    #name =  out_folder + '/%05d' % (video.current - 1) + '.png'
                    #print('Save: ', name)

                    params_c = copy.deepcopy(params)
                    model_params_c = copy.deepcopy(model_params)
                    canvas, ans = process(frame, params_c, model_params_c)

                    if ans is not '\n':
                        final_ans = final_ans + '%05d-' % (video.current -
                                                           1) + ans

                    #cv2.imwrite(name, canvas)

                    #print(final_ans)

                    #cv2.imshow('frame', canvas)
                    #if cv2.waitKey(1) & 0xFF == ord('q'):
                    #    break
                ret, frame = video.getCurrent()

            #print (final_ans)
            base = os.path.basename(file_name)
            base = os.path.splitext(base)[0] + '.txt'
            name = out_folder + '/' + base
            u_save2File(name, final_ans)
            data['tracklet_file'] = name

        # in complete analisys
        else:
            while (ret):

                params_c = copy.deepcopy(params)
                model_params_c = copy.deepcopy(model_params)
                canvas, ans = process(frame, params_c, model_params_c)
                if ans is not '\n':
                    final_ans = final_ans + '%05d-' % (video.current - 1) + ans
                ret, frame = video.getCurrent()

            base = os.path.basename(file_name)
            base = os.path.splitext(base)[0] + '.txt'
            name = out_folder + '/' + base
            u_save2File(name, final_ans)
            data['tracklet_file'] = name

    #............................................................................
    #proerty flag
    if prop:
        out_prop_folder = out_folder + '/props'
        if not os.path.exists(out_prop_folder):
            os.makedirs(out_prop_folder)

        data_ = {
            "video_file": file_name,
            "ini": video.pos_ini,
            "fin": video.pos_fin,
            "width": video.width,
            "height": video.height
        }

        data.update(data_)
        base = os.path.basename(file_name)
        base = os.path.splitext(base)[0] + '.prop'
        name = out_prop_folder + '/' + base

        #saving in file
        print('Save prop in: ', name)
        with open(name, 'w') as outfile:
            json.dump(data, outfile)