Example #1
0
    def basicReport(self, branchIdList, developerIdList, dateRange):
        dataCollector = DataCollector(branchIdList, developerIdList, dateRange)
        title = "GIT VIEW TEAM DATA REPORT"
        subtitle = '_'.join([
            dateRange[0].strftime("%Y.%m.%d"),
            dateRange[1].strftime("%Y.%m.%d")
        ])

        self.__cover(title, subtitle)
        self.pdf.showPage()
        self.location = self.startLocation
        self.__singleLayer(
            'Developers Data',
            dataCollector.developerDataBrief()
        )
        self.__doubleLayer(
            'Branches Data',
            dataCollector.branchData()
        )
        self.__tribleLayer(
            'Developers Data in Projects',
            dataCollector.developerDataVerbose()
        )
        self.pdf.showPage()
        self.pdf.save()
        report = self.buffer.getvalue()
        self.buffer.close()
        return report
Example #2
0
 def tagReport(self, branchIdList, developerIdList, tagRange):
     dateRange = (
         min(tagRange[0].submit_date, tagRange[1].submit_date),
         max(tagRange[0].submit_date, tagRange[1].submit_date)
     )
     dataCollector = DataCollector(branchIdList, developerIdList, dateRange)
     title = "GIT VIEW TAG DATA REPORT"
     subtitle = '%s %s to %s %s' % (
         tagRange[0].project.name,
         tagRange[0].name,
         tagRange[1].project.name,
         tagRange[1].name
     )
     self.__cover(title, subtitle)
     self.pdf.showPage()
     self.location = self.startLocation
     self.__singleLayer(
         'Developers Data',
         dataCollector.developerDataBrief()
     )
     self.__doubleLayer(
         'Branches Data',
         dataCollector.branchData()
     )
     self.__tribleLayer(
         'Developers Data in Projects',
         dataCollector.developerDataVerbose()
     )
     self.pdf.showPage()
     self.pdf.save()
     report = self.buffer.getvalue()
     self.buffer.close()
     return report
Example #3
0
    def __init__(self,dq,state,freq):
        self._running = True
        self._display_surf = None
        self._image_surf = None
        self._apple_surf = None

        self.game = Game()
        self.player = Player(3,self.windowDimX, self.windowDimY)
        self.windowWidth = self.windowDimX*self.player.step
        self.windowHeight = self.windowDimY*self.player.step 
        self.toolbar = Toolbar(self.toolbarWidth, self.windowWidth, self.windowHeight)
        self.apple = Apple(randint(0,self.windowDimX-1), randint(0,self.windowDimY-1))
        self.dataCollect = DataCollector()
        self.displayq=dq
        self.state_size = state
        self.frequency = freq
Example #4
0
class GraphPlotter:

    dc = DataCollector()
    #PLOT FOR AVERAGE NUMBER OF MESSAGES IN NODE INSERTION VS NUMBER OF NODES
    def plotForInserts(self):
        plotter.plot(self.dc.n, self.dc.avg_insert_messages, color='b', linewidth=2.0)
        plotter.ylabel("Average Number of Messages(Node Arrival)")
        plotter.xlabel("Number of Nodes(N)")
        plotter.show()

    #PLOT FOR AVERAGE NUMBER OF MESSAGES IN NODE DELETION VS NUMBER OF NODES
    def plotForDeletes(self):
        plotter.plot(self.dc.n, self.dc.avg_delete_messages, color='r', linewidth=2.0)
        plotter.ylabel("Average Number of Messages(Node Departure)")
        plotter.xlabel("Number of Nodes(N)")
        plotter.show()

    #PLOT FOR AVERAGE NUMBER OF MESSAGES IN KEY LOOKUP VS NUMBER OF NODES
    def plotForLookups(self):
        plotter.plot(self.dc.n, self.dc.avg_lookup_messages, color='g', linewidth=2.0)
        plotter.ylabel("Average Number of Messages(Key Lookup)")
        plotter.xlabel("Number of Nodes(N)")
        plotter.show()

    #PLOT FOR GLOBAL FINGER TABLE HIT RATIO
    def plotForFingerTableEfficacy(self):
        plotter.plot(self.dc.n, self.dc.finger_table_hitratio, color='g', linewidth=2.0)
        plotter.ylabel("Finger Table Hit Ratio")
        plotter.xlabel("Number of Nodes(N)")
        plotter.show()
def fetchAndSaveData():
    global time_last
    try:
        datacollector = DataCollector()
        time_now = getTime()
        data = datacollector.getCarsData()
        oldDataFile = getFileName(time_last)
        print time_last
        if(os.path.exists(oldDataFile)):
            missingData = datacollector.getMissingCars(data,oldDataFile)
            for missingEntry in missingData:
                try:   
                    db.add_entry(Cars(missingEntry, time_now))
                except:
                    print 'Booking========================================================================================='
                    traceback.print_exc()
                    print missingEntry
            releasedData = datacollector.getReleasedCars(data,oldDataFile)
            for releasedEntry in releasedData:
                try:
                    db.updateEntry(releasedEntry, time_now)
                except:
                    print '========================================================================================='
                    print("Unexpected error:", sys.exc_info()[0])
                    traceback.print_exc()
                    print releasedEntry
        datacollector.writeToFile(data, getFileName(time_now))
        time_last = time_now
    except:
        print 'exception', sys.exc_info()[0]
        traceback.print_exc()
Example #6
0
    def startTask(self, cond, driving='?', blockNum=0):
        self.condition = cond
        self.fullCondition = driving + '_' + cond
        self.run += 1

        datacond = '_' + self.roadCond + '_tablet'

        if self.doPractice:
            datacond = '_prac' + datacond

        self.db = DataCollector(
            'Tablet DB',
            'data/' + globals.participant + datacond + '_tablet.dat', [
                'pp', 'cond', 'correct', 'answer', 'conversation', 'question',
                'condtime', 'time'
            ])
        self.db.open()

        self.started = True
        self.pickConversation()

        self.startNextClip()
Example #7
0
    def __init__(self, root, datamodel, config):
        """Initiate the program
        
        Arguments
        root -- the Tk instance of the application
        datamodel -- the data model instance to show
        config -- ConfigParser containing application settings
        
        """
        self._datamodel = datamodel

        #Create string variables        
        self._lblTitle = StringVar()
        self._lblAuthor = StringVar()
        self._activeInPreview = []
        self._activeInPlay = []
        self._datacollector = DataCollector()
        self._userstop = False
        
        #Initiate GUI and data
        self._setupDisplay(root, config)
        self._initData(config)
        
        #Create transparent buttons
        self._setupCategories(config)
        self._setupControls(root, config)

        #Create player
        self._playerFrame = PlayerFrame(root, self._settings.playersize, self._playbackStopped)
        self._playerFrame.setClbClicked(self._clbClicked)
        self._playerwnd = self._canvas.create_window(self._settings.playerpos[0], 
                                                     self._settings.playerpos[1], 
                                                     window = self._playerFrame, 
                                                     anchor = NW)

        #Create preview
        self._previewFrame = PreviewFrame(root, self._datacollector, self._canvas, self._settings)
        self._previewFrame.setClbActivate(self._clbClicked)
        self._previewFrame.previewsubset(self._currentMovies)

        self._updateBrowseButtons()
        
        self._setPreviewMode()

        self._dml = DataModelLoader(root, self._datamodel)
        #after_idle does not work here; 100 ms is an arbitrarily chosen delay time
        root.after(100, self._dml.load)
Example #8
0
    def startTask(self, cond, driving='?', blockNum=0):
        self.condition = cond
        self.fullCondition = driving+'_'+cond
        self.run += 1

        datacond = '_' + self.roadCond + '_tablet'

        if self.doPractice:
            datacond = '_prac' + datacond

        self.db = DataCollector('Tablet DB', 'data/'+globals.participant+datacond+'_tablet.dat', ['pp', 'cond', 'correct',
                                                                           'answer', 'conversation', 'question',
                                                                           'condtime', 'time'])
        self.db.open()

        self.started = True
        self.pickConversation()

        self.startNextClip()
Example #9
0
    def __init__(self,
                 cfg,
                 is_video=True,
                 multi_camera=False,
                 enable_attr=False,
                 enable_action=False,
                 device='CPU',
                 run_mode='paddle',
                 trt_min_shape=1,
                 trt_max_shape=1280,
                 trt_opt_shape=640,
                 trt_calib_mode=False,
                 cpu_threads=1,
                 enable_mkldnn=False,
                 output_dir='output',
                 draw_center_traj=False,
                 secs_interval=10,
                 do_entrance_counting=False):

        if enable_attr and not cfg.get('ATTR', False):
            ValueError(
                'enable_attr is set to True, please set ATTR in config file')
        if enable_action and (not cfg.get('ACTION', False)
                              or not cfg.get('KPT', False)):
            ValueError(
                'enable_action is set to True, please set KPT and ACTION in config file'
            )

        self.with_attr = cfg.get('ATTR', False) and enable_attr
        self.with_action = cfg.get('ACTION', False) and enable_action
        self.with_mtmct = cfg.get('REID', False) and multi_camera
        if self.with_attr:
            print('Attribute Recognition enabled')
        if self.with_action:
            print('Action Recognition enabled')
        if multi_camera:
            if not self.with_mtmct:
                print(
                    'Warning!!! MTMCT enabled, but cannot find REID config in [infer_cfg.yml], please check!'
                )
            else:
                print("MTMCT enabled")

        self.is_video = is_video
        self.multi_camera = multi_camera
        self.cfg = cfg
        self.output_dir = output_dir
        self.draw_center_traj = draw_center_traj
        self.secs_interval = secs_interval
        self.do_entrance_counting = do_entrance_counting

        self.warmup_frame = self.cfg['warmup_frame']
        self.pipeline_res = Result()
        self.pipe_timer = PipeTimer()
        self.file_name = None
        self.collector = DataCollector()

        if not is_video:
            det_cfg = self.cfg['DET']
            model_dir = det_cfg['model_dir']
            batch_size = det_cfg['batch_size']
            self.det_predictor = Detector(model_dir, device, run_mode,
                                          batch_size, trt_min_shape,
                                          trt_max_shape, trt_opt_shape,
                                          trt_calib_mode, cpu_threads,
                                          enable_mkldnn)
            if self.with_attr:
                attr_cfg = self.cfg['ATTR']
                model_dir = attr_cfg['model_dir']
                batch_size = attr_cfg['batch_size']
                self.attr_predictor = AttrDetector(model_dir, device, run_mode,
                                                   batch_size, trt_min_shape,
                                                   trt_max_shape,
                                                   trt_opt_shape,
                                                   trt_calib_mode, cpu_threads,
                                                   enable_mkldnn)

        else:
            mot_cfg = self.cfg['MOT']
            model_dir = mot_cfg['model_dir']
            tracker_config = mot_cfg['tracker_config']
            batch_size = mot_cfg['batch_size']
            self.mot_predictor = SDE_Detector(
                model_dir,
                tracker_config,
                device,
                run_mode,
                batch_size,
                trt_min_shape,
                trt_max_shape,
                trt_opt_shape,
                trt_calib_mode,
                cpu_threads,
                enable_mkldnn,
                draw_center_traj=draw_center_traj,
                secs_interval=secs_interval,
                do_entrance_counting=do_entrance_counting)
            if self.with_attr:
                attr_cfg = self.cfg['ATTR']
                model_dir = attr_cfg['model_dir']
                batch_size = attr_cfg['batch_size']
                self.attr_predictor = AttrDetector(model_dir, device, run_mode,
                                                   batch_size, trt_min_shape,
                                                   trt_max_shape,
                                                   trt_opt_shape,
                                                   trt_calib_mode, cpu_threads,
                                                   enable_mkldnn)
            if self.with_action:
                kpt_cfg = self.cfg['KPT']
                kpt_model_dir = kpt_cfg['model_dir']
                kpt_batch_size = kpt_cfg['batch_size']
                action_cfg = self.cfg['ACTION']
                action_model_dir = action_cfg['model_dir']
                action_batch_size = action_cfg['batch_size']
                action_frames = action_cfg['max_frames']
                display_frames = action_cfg['display_frames']
                self.coord_size = action_cfg['coord_size']

                self.kpt_predictor = KeyPointDetector(kpt_model_dir,
                                                      device,
                                                      run_mode,
                                                      kpt_batch_size,
                                                      trt_min_shape,
                                                      trt_max_shape,
                                                      trt_opt_shape,
                                                      trt_calib_mode,
                                                      cpu_threads,
                                                      enable_mkldnn,
                                                      use_dark=False)
                self.kpt_buff = KeyPointBuff(action_frames)

                self.action_predictor = ActionRecognizer(
                    action_model_dir,
                    device,
                    run_mode,
                    action_batch_size,
                    trt_min_shape,
                    trt_max_shape,
                    trt_opt_shape,
                    trt_calib_mode,
                    cpu_threads,
                    enable_mkldnn,
                    window_size=action_frames)

                self.action_visual_helper = ActionVisualHelper(display_frames)

        if self.with_mtmct:
            reid_cfg = self.cfg['REID']
            model_dir = reid_cfg['model_dir']
            batch_size = reid_cfg['batch_size']
            self.reid_predictor = ReID(model_dir, device, run_mode, batch_size,
                                       trt_min_shape, trt_max_shape,
                                       trt_opt_shape, trt_calib_mode,
                                       cpu_threads, enable_mkldnn)
Example #10
0
class PipePredictor(object):
    """
    Predictor in single camera
    
    The pipeline for image input: 

        1. Detection
        2. Detection -> Attribute

    The pipeline for video input: 

        1. Tracking
        2. Tracking -> Attribute
        3. Tracking -> KeyPoint -> Action Recognition

    Args:
        cfg (dict): config of models in pipeline
        is_video (bool): whether the input is video, default as False
        multi_camera (bool): whether to use multi camera in pipeline, 
            default as False
        camera_id (int): the device id of camera to predict, default as -1
        enable_attr (bool): whether use attribute recognition, default as false
        enable_action (bool): whether use action recognition, default as false
        device (string): the device to predict, options are: CPU/GPU/XPU, 
            default as CPU
        run_mode (string): the mode of prediction, options are: 
            paddle/trt_fp32/trt_fp16, default as paddle
        trt_min_shape (int): min shape for dynamic shape in trt, default as 1
        trt_max_shape (int): max shape for dynamic shape in trt, default as 1280
        trt_opt_shape (int): opt shape for dynamic shape in trt, default as 640
        trt_calib_mode (bool): If the model is produced by TRT offline quantitative
            calibration, trt_calib_mode need to set True. default as False
        cpu_threads (int): cpu threads, default as 1
        enable_mkldnn (bool): whether to open MKLDNN, default as False
        output_dir (string): The path of output, default as 'output'
        draw_center_traj (bool): Whether drawing the trajectory of center, default as False
        secs_interval (int): The seconds interval to count after tracking, default as 10
        do_entrance_counting(bool): Whether counting the numbers of identifiers entering 
            or getting out from the entrance, default as False,only support single class
            counting in MOT.
    """
    def __init__(self,
                 cfg,
                 is_video=True,
                 multi_camera=False,
                 enable_attr=False,
                 enable_action=False,
                 device='CPU',
                 run_mode='paddle',
                 trt_min_shape=1,
                 trt_max_shape=1280,
                 trt_opt_shape=640,
                 trt_calib_mode=False,
                 cpu_threads=1,
                 enable_mkldnn=False,
                 output_dir='output',
                 draw_center_traj=False,
                 secs_interval=10,
                 do_entrance_counting=False):

        if enable_attr and not cfg.get('ATTR', False):
            ValueError(
                'enable_attr is set to True, please set ATTR in config file')
        if enable_action and (not cfg.get('ACTION', False)
                              or not cfg.get('KPT', False)):
            ValueError(
                'enable_action is set to True, please set KPT and ACTION in config file'
            )

        self.with_attr = cfg.get('ATTR', False) and enable_attr
        self.with_action = cfg.get('ACTION', False) and enable_action
        self.with_mtmct = cfg.get('REID', False) and multi_camera
        if self.with_attr:
            print('Attribute Recognition enabled')
        if self.with_action:
            print('Action Recognition enabled')
        if multi_camera:
            if not self.with_mtmct:
                print(
                    'Warning!!! MTMCT enabled, but cannot find REID config in [infer_cfg.yml], please check!'
                )
            else:
                print("MTMCT enabled")

        self.is_video = is_video
        self.multi_camera = multi_camera
        self.cfg = cfg
        self.output_dir = output_dir
        self.draw_center_traj = draw_center_traj
        self.secs_interval = secs_interval
        self.do_entrance_counting = do_entrance_counting

        self.warmup_frame = self.cfg['warmup_frame']
        self.pipeline_res = Result()
        self.pipe_timer = PipeTimer()
        self.file_name = None
        self.collector = DataCollector()

        if not is_video:
            det_cfg = self.cfg['DET']
            model_dir = det_cfg['model_dir']
            batch_size = det_cfg['batch_size']
            self.det_predictor = Detector(model_dir, device, run_mode,
                                          batch_size, trt_min_shape,
                                          trt_max_shape, trt_opt_shape,
                                          trt_calib_mode, cpu_threads,
                                          enable_mkldnn)
            if self.with_attr:
                attr_cfg = self.cfg['ATTR']
                model_dir = attr_cfg['model_dir']
                batch_size = attr_cfg['batch_size']
                self.attr_predictor = AttrDetector(model_dir, device, run_mode,
                                                   batch_size, trt_min_shape,
                                                   trt_max_shape,
                                                   trt_opt_shape,
                                                   trt_calib_mode, cpu_threads,
                                                   enable_mkldnn)

        else:
            mot_cfg = self.cfg['MOT']
            model_dir = mot_cfg['model_dir']
            tracker_config = mot_cfg['tracker_config']
            batch_size = mot_cfg['batch_size']
            self.mot_predictor = SDE_Detector(
                model_dir,
                tracker_config,
                device,
                run_mode,
                batch_size,
                trt_min_shape,
                trt_max_shape,
                trt_opt_shape,
                trt_calib_mode,
                cpu_threads,
                enable_mkldnn,
                draw_center_traj=draw_center_traj,
                secs_interval=secs_interval,
                do_entrance_counting=do_entrance_counting)
            if self.with_attr:
                attr_cfg = self.cfg['ATTR']
                model_dir = attr_cfg['model_dir']
                batch_size = attr_cfg['batch_size']
                self.attr_predictor = AttrDetector(model_dir, device, run_mode,
                                                   batch_size, trt_min_shape,
                                                   trt_max_shape,
                                                   trt_opt_shape,
                                                   trt_calib_mode, cpu_threads,
                                                   enable_mkldnn)
            if self.with_action:
                kpt_cfg = self.cfg['KPT']
                kpt_model_dir = kpt_cfg['model_dir']
                kpt_batch_size = kpt_cfg['batch_size']
                action_cfg = self.cfg['ACTION']
                action_model_dir = action_cfg['model_dir']
                action_batch_size = action_cfg['batch_size']
                action_frames = action_cfg['max_frames']
                display_frames = action_cfg['display_frames']
                self.coord_size = action_cfg['coord_size']

                self.kpt_predictor = KeyPointDetector(kpt_model_dir,
                                                      device,
                                                      run_mode,
                                                      kpt_batch_size,
                                                      trt_min_shape,
                                                      trt_max_shape,
                                                      trt_opt_shape,
                                                      trt_calib_mode,
                                                      cpu_threads,
                                                      enable_mkldnn,
                                                      use_dark=False)
                self.kpt_buff = KeyPointBuff(action_frames)

                self.action_predictor = ActionRecognizer(
                    action_model_dir,
                    device,
                    run_mode,
                    action_batch_size,
                    trt_min_shape,
                    trt_max_shape,
                    trt_opt_shape,
                    trt_calib_mode,
                    cpu_threads,
                    enable_mkldnn,
                    window_size=action_frames)

                self.action_visual_helper = ActionVisualHelper(display_frames)

        if self.with_mtmct:
            reid_cfg = self.cfg['REID']
            model_dir = reid_cfg['model_dir']
            batch_size = reid_cfg['batch_size']
            self.reid_predictor = ReID(model_dir, device, run_mode, batch_size,
                                       trt_min_shape, trt_max_shape,
                                       trt_opt_shape, trt_calib_mode,
                                       cpu_threads, enable_mkldnn)

    def set_file_name(self, path):
        if path is not None:
            self.file_name = os.path.split(path)[-1]
        else:
            # use camera id
            self.file_name = None

    def get_result(self):
        return self.collector.get_res()

    def run(self, input):
        if self.is_video:
            self.predict_video(input)
        else:
            self.predict_image(input)
        self.pipe_timer.info()

    def predict_image(self, input):
        # det
        # det -> attr
        batch_loop_cnt = math.ceil(
            float(len(input)) / self.det_predictor.batch_size)
        for i in range(batch_loop_cnt):
            start_index = i * self.det_predictor.batch_size
            end_index = min((i + 1) * self.det_predictor.batch_size,
                            len(input))
            batch_file = input[start_index:end_index]
            batch_input = [decode_image(f, {})[0] for f in batch_file]

            if i > self.warmup_frame:
                self.pipe_timer.total_time.start()
                self.pipe_timer.module_time['det'].start()
            # det output format: class, score, xmin, ymin, xmax, ymax
            det_res = self.det_predictor.predict_image(batch_input,
                                                       visual=False)
            det_res = self.det_predictor.filter_box(det_res,
                                                    self.cfg['crop_thresh'])
            if i > self.warmup_frame:
                self.pipe_timer.module_time['det'].end()
            self.pipeline_res.update(det_res, 'det')

            if self.with_attr:
                crop_inputs = crop_image_with_det(batch_input, det_res)
                attr_res_list = []

                if i > self.warmup_frame:
                    self.pipe_timer.module_time['attr'].start()

                for crop_input in crop_inputs:
                    attr_res = self.attr_predictor.predict_image(crop_input,
                                                                 visual=False)
                    attr_res_list.extend(attr_res['output'])

                if i > self.warmup_frame:
                    self.pipe_timer.module_time['attr'].end()

                attr_res = {'output': attr_res_list}
                self.pipeline_res.update(attr_res, 'attr')

            self.pipe_timer.img_num += len(batch_input)
            if i > self.warmup_frame:
                self.pipe_timer.total_time.end()

            if self.cfg['visual']:
                self.visualize_image(batch_file, batch_input,
                                     self.pipeline_res)

    def predict_video(self, video_file):
        # mot
        # mot -> attr
        # mot -> pose -> action
        capture = cv2.VideoCapture(video_file)
        video_out_name = 'output.mp4' if self.file_name is None else self.file_name

        # Get Video info : resolution, fps, frame count
        width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fps = int(capture.get(cv2.CAP_PROP_FPS))
        frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
        print("video fps: %d, frame_count: %d" % (fps, frame_count))

        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)
        out_path = os.path.join(self.output_dir, video_out_name)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
        frame_id = 0

        entrance, records, center_traj = None, None, None
        if self.draw_center_traj:
            center_traj = [{}]
        id_set = set()
        interval_id_set = set()
        in_id_list = list()
        out_id_list = list()
        prev_center = dict()
        records = list()
        entrance = [0, height / 2., width, height / 2.]
        video_fps = fps

        while (1):
            if frame_id % 10 == 0:
                print('frame id: ', frame_id)
            ret, frame = capture.read()
            if not ret:
                break

            if frame_id > self.warmup_frame:
                self.pipe_timer.total_time.start()
                self.pipe_timer.module_time['mot'].start()
            res = self.mot_predictor.predict_image([copy.deepcopy(frame)],
                                                   visual=False)

            if frame_id > self.warmup_frame:
                self.pipe_timer.module_time['mot'].end()

            # mot output format: id, class, score, xmin, ymin, xmax, ymax
            mot_res = parse_mot_res(res)

            # flow_statistic only support single class MOT
            boxes, scores, ids = res[0]  # batch size = 1 in MOT
            mot_result = (frame_id + 1, boxes[0], scores[0], ids[0]
                          )  # single class
            statistic = flow_statistic(mot_result, self.secs_interval,
                                       self.do_entrance_counting, video_fps,
                                       entrance, id_set, interval_id_set,
                                       in_id_list, out_id_list, prev_center,
                                       records)
            records = statistic['records']

            # nothing detected
            if len(mot_res['boxes']) == 0:
                frame_id += 1
                if frame_id > self.warmup_frame:
                    self.pipe_timer.img_num += 1
                    self.pipe_timer.total_time.end()
                if self.cfg['visual']:
                    _, _, fps = self.pipe_timer.get_total_time()
                    im = self.visualize_video(frame, mot_res, frame_id,
                                              fps)  # visualize
                    writer.write(im)
                    if self.file_name is None:  # use camera_id
                        cv2.imshow('PPHuman', im)
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break

                continue

            self.pipeline_res.update(mot_res, 'mot')
            if self.with_attr or self.with_action:
                crop_input, new_bboxes, ori_bboxes = crop_image_with_mot(
                    frame, mot_res)

            if self.with_attr:
                if frame_id > self.warmup_frame:
                    self.pipe_timer.module_time['attr'].start()
                attr_res = self.attr_predictor.predict_image(crop_input,
                                                             visual=False)
                if frame_id > self.warmup_frame:
                    self.pipe_timer.module_time['attr'].end()
                self.pipeline_res.update(attr_res, 'attr')

            if self.with_action:
                if frame_id > self.warmup_frame:
                    self.pipe_timer.module_time['kpt'].start()
                kpt_pred = self.kpt_predictor.predict_image(crop_input,
                                                            visual=False)
                keypoint_vector, score_vector = translate_to_ori_images(
                    kpt_pred, np.array(new_bboxes))
                kpt_res = {}
                kpt_res['keypoint'] = [
                    keypoint_vector.tolist(),
                    score_vector.tolist()
                ] if len(keypoint_vector) > 0 else [[], []]
                kpt_res['bbox'] = ori_bboxes
                if frame_id > self.warmup_frame:
                    self.pipe_timer.module_time['kpt'].end()

                self.pipeline_res.update(kpt_res, 'kpt')

                self.kpt_buff.update(kpt_res, mot_res)  # collect kpt output
                state = self.kpt_buff.get_state(
                )  # whether frame num is enough or lost tracker

                action_res = {}
                if state:
                    if frame_id > self.warmup_frame:
                        self.pipe_timer.module_time['action'].start()
                    collected_keypoint = self.kpt_buff.get_collected_keypoint(
                    )  # reoragnize kpt output with ID
                    action_input = parse_mot_keypoint(collected_keypoint,
                                                      self.coord_size)
                    action_res = self.action_predictor.predict_skeleton_with_mot(
                        action_input)
                    if frame_id > self.warmup_frame:
                        self.pipe_timer.module_time['action'].end()
                    self.pipeline_res.update(action_res, 'action')

                if self.cfg['visual']:
                    self.action_visual_helper.update(action_res)

            if self.with_mtmct and frame_id % 10 == 0:
                crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot(
                    frame, mot_res)
                if frame_id > self.warmup_frame:
                    self.pipe_timer.module_time['reid'].start()
                reid_res = self.reid_predictor.predict_batch(crop_input)

                if frame_id > self.warmup_frame:
                    self.pipe_timer.module_time['reid'].end()

                reid_res_dict = {
                    'features': reid_res,
                    "qualities": img_qualities,
                    "rects": rects
                }
                self.pipeline_res.update(reid_res_dict, 'reid')
            else:
                self.pipeline_res.clear('reid')

            self.collector.append(frame_id, self.pipeline_res)

            if frame_id > self.warmup_frame:
                self.pipe_timer.img_num += 1
                self.pipe_timer.total_time.end()
            frame_id += 1

            if self.cfg['visual']:
                _, _, fps = self.pipe_timer.get_total_time()
                im = self.visualize_video(frame, self.pipeline_res, frame_id,
                                          fps, entrance, records,
                                          center_traj)  # visualize
                writer.write(im)
                if self.file_name is None:  # use camera_id
                    cv2.imshow('PPHuman', im)
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

        writer.release()
        print('save result to {}'.format(out_path))

    def visualize_video(self,
                        image,
                        result,
                        frame_id,
                        fps,
                        entrance=None,
                        records=None,
                        center_traj=None):
        mot_res = copy.deepcopy(result.get('mot'))
        if mot_res is not None:
            ids = mot_res['boxes'][:, 0]
            scores = mot_res['boxes'][:, 2]
            boxes = mot_res['boxes'][:, 3:]
            boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
            boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
        else:
            boxes = np.zeros([0, 4])
            ids = np.zeros([0])
            scores = np.zeros([0])

        # single class, still need to be defaultdict type for ploting
        num_classes = 1
        online_tlwhs = defaultdict(list)
        online_scores = defaultdict(list)
        online_ids = defaultdict(list)
        online_tlwhs[0] = boxes
        online_scores[0] = scores
        online_ids[0] = ids

        image = plot_tracking_dict(
            image,
            num_classes,
            online_tlwhs,
            online_ids,
            online_scores,
            frame_id=frame_id,
            fps=fps,
            do_entrance_counting=self.do_entrance_counting,
            entrance=entrance,
            records=records,
            center_traj=center_traj)

        attr_res = result.get('attr')
        if attr_res is not None:
            boxes = mot_res['boxes'][:, 1:]
            attr_res = attr_res['output']
            image = visualize_attr(image, attr_res, boxes)
            image = np.array(image)

        kpt_res = result.get('kpt')
        if kpt_res is not None:
            image = visualize_pose(image,
                                   kpt_res,
                                   visual_thresh=self.cfg['kpt_thresh'],
                                   returnimg=True)

        action_res = result.get('action')
        if action_res is not None:
            image = visualize_action(image, mot_res['boxes'],
                                     self.action_visual_helper, "Falling")

        return image

    def visualize_image(self, im_files, images, result):
        start_idx, boxes_num_i = 0, 0
        det_res = result.get('det')
        attr_res = result.get('attr')
        for i, (im_file, im) in enumerate(zip(im_files, images)):
            if det_res is not None:
                det_res_i = {}
                boxes_num_i = det_res['boxes_num'][i]
                det_res_i['boxes'] = det_res['boxes'][start_idx:start_idx +
                                                      boxes_num_i, :]
                im = visualize_box_mask(im,
                                        det_res_i,
                                        labels=['person'],
                                        threshold=self.cfg['crop_thresh'])
                im = np.ascontiguousarray(np.copy(im))
                im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
            if attr_res is not None:
                attr_res_i = attr_res['output'][start_idx:start_idx +
                                                boxes_num_i]
                im = visualize_attr(im, attr_res_i, det_res_i['boxes'])
            img_name = os.path.split(im_file)[-1]
            if not os.path.exists(self.output_dir):
                os.makedirs(self.output_dir)
            out_path = os.path.join(self.output_dir, img_name)
            cv2.imwrite(out_path, im)
            print("save result to: " + out_path)
            start_idx += boxes_num_i
Example #11
0
# Main Simulator Module        #
# #########################################

from node import Node
from idGenerator import IdGenerator
from operation import  Operation
from stats import Stats
from datacollector import DataCollector
from graphplotter import GraphPlotter


nodeIdGenerator = IdGenerator()
nodeIdGenerator.lastAllottedId = property.INITID
nodeIdGenerator.randomRange = property.RANGEID

data_collector = DataCollector()

maxId = 0

########## NODES IN RING OVERLAY #######################
nodes = []
########################################################

############## OPERATIONS AND EXPERIMENTATION ##########
def add_node(newNode, existingnode):
    newNode.join(existingnode)
    newNode.update_others(Operation.INSERT)

def print_ring(start):
    trav = start
    trav.node_info()
Example #12
0
class App:
    windowDimY = 14
    windowDimX = 18
    windowWidth = 0
    windowHeight = 0
    toolbarWidth = 200
    player = 0
    apple = 0
    toolbar = 0
    dataCollect = 0
    displayq = False
 
    def __init__(self,dq,state,freq):
        self._running = True
        self._display_surf = None
        self._image_surf = None
        self._apple_surf = None

        self.game = Game()
        self.player = Player(3,self.windowDimX, self.windowDimY)
        self.windowWidth = self.windowDimX*self.player.step
        self.windowHeight = self.windowDimY*self.player.step 
        self.toolbar = Toolbar(self.toolbarWidth, self.windowWidth, self.windowHeight)
        self.apple = Apple(randint(0,self.windowDimX-1), randint(0,self.windowDimY-1))
        self.dataCollect = DataCollector()
        self.displayq=dq
        self.state_size = state
        self.frequency = freq

    def on_init(self):
        pygame.init()
        if(self.displayq):
            self._display_surf = pygame.display.set_mode((self.windowWidth+self.toolbarWidth,self.windowHeight), pygame.HWSURFACE)
            pygame.display.set_caption('Pygame Snake game!')
            self._image_surf = pygame.image.load("images/game_objects/smake.png").convert()
            self._apple_surf = pygame.image.load("images/game_objects/smapple.png").convert()
            self.toolbar.load_images()
        self._running = True
        # self.savepath = "frames/"+time.strftime("%Y%m%d-%H%M%S")
        # os.mkdir(self.savepath)
 
    def on_event(self, event):
        if event.type == QUIT:
            self._running = False
 
    def on_loop(self): 
        self.player.update()

        # does snake collide with itself?
        for i in range(2,self.player.length):
            if self.game.isCollision(self.player.x[0],self.player.y[0],self.player.x[i], self.player.y[i],self.player.step-1):
                print("You lose! Collision with yourself: ")
                print("x[0] (" + str(self.player.x[0]) + "," + str(self.player.y[0]) + ")")
                print("x[" + str(i) + "] (" + str(self.player.x[i]) + "," + str(self.player.y[i]) + ")")
                self.player.crashed = True
                
        # does snake eat apple?
        for i in range(0,self.player.length):
            if self.game.isCollision(self.apple.x,self.apple.y,self.player.x[i], self.player.y[i],self.player.step-1):
                self.apple.x = randint(0,self.windowDimX-1) * self.player.step
                self.apple.y = randint(0,self.windowDimY-1) * self.player.step
                print("apple x=",self.apple.x,"apple y=",self.apple.y)
                self.player.eatenApple = True
 
        #does snake collide with wall?
        if(self.player.x[0]<0 or self.player.x[0]>=self.windowWidth or self.player.y[0]<0 or self.player.y[0]>=self.windowHeight):
            print("You lose! Collision with wall: ")
            print("x[0] (" + str(self.player.x[0]) + "," + str(self.player.y[0]) + ")")
            self.player.crashed = True
        
        pass

    def on_render(self, state):
        self._display_surf.fill((0,0,0))
        self.player.draw(self._display_surf, self._image_surf)
        self.apple.draw(self._display_surf, self._apple_surf)
        self.toolbar.draw(self._display_surf, self.player.direction, state)
        pygame.display.flip()
 
    def on_cleanup(self):
        pygame.quit()

    # initializing agent before the 1st move
    def init_agent(self, agent):
        state_init1 = agent.get_state(game, self.player, self.food) #first state after random placement
        #first action
        
    def reset_player(self):
        self.player = Player(3,self.windowDimX, self.windowDimY)
        #print(self.player.x)
        #print(self.player.y)

    def on_execute(self,speed):
        print('starting execution!')
        params = parameters()
        agent = dqnagent(params,self.state_size) #initialize the agent!
        if(agent.load_weights): #load weights maybe
            agent.model.load_weights(agent.weights)
            print("loaded the weights")

        counter = 0 #how many games have been played/trials done
        record = 0 #highest score

        #---------------------------------------------------------------------------
        #------------------------- LOOP THRU EPISODES ------------------------------
        #---------------------------------------------------------------------------

        while counter<params['episodes']: #still have more trials to do
            counter += 1
            print(counter)

            print("\nEPISODE ", counter, "\n")

            if not params['train']: # if you're not training it, no need for exploration
                agent.epsilon = 0
            else:
                agent.epsilon = 1.0 - ((counter -1) * params['epsilon_decay_linear'])#exploration/randomness factor that decreases over time

            #print("EPSILON = ", agent.epsilon, "\n")

            if self.on_init() == False:
                self._running = False

            #print("PLAYER\tx : ", self.player.x,"\ty : ", self.player.y, "\n")
            duration = 0

            #---------------------------------------------------------------------------
            #--------------------------- INDIVIDUAL EPISODE ----------------------------
            #---------------------------------------------------------------------------
            # indexx = 0
            while(self._running):
                if(counter%self.frequency==0):
                    self.dataCollect.record(self.player.x, self.player.y, self.apple.x, self.apple.y)
                duration+=1
                #print("\nMOVE : ", duration, "\n")
                if(self.displayq):
                    pygame.event.pump()
                    keys = pygame.key.get_pressed() 
                    if (keys[K_ESCAPE]):
                        exit(0)
                oldstate = agent.get_state(self, self.player, self.apple)
                #print("\noldstate = ", oldstate)


                #--------------------------- GET AGENT ACTION ----------------------------

                if random() < agent.epsilon: #every so often random exploration
                    action = randint(0,3) #random action
                    #print("random action : ",action)
                else: #Actionprecited by agent
                    state = oldstate.reshape(1,self.state_size**2+8)
                    predictedq= agent.model.predict(state) # predicts the q values for the action in that state
                    action = np.argmax(predictedq[0]) #maximum (highest q) action
                    #print("predicted action : ", action, "\tq-values : ", predictedq)


                #---------------------------- EXECUTE ACTION -----------------------------

                print(action)
                self.player.do_move(action) #do the action
                self.on_loop()
                newstate = agent.get_state(self, self.player, self.apple) #new state from the action we've taken
                reward = agent.set_reward(self.player)
                #print("newstate = ", newstate)
                #print("reward = ", reward)
                #print("crashed = ", self.player.crashed, "\n")


                #---------------------------- SHORT TRAINING -----------------------------

                if(params['train']):
                    agent.train_short_memory(oldstate,action, reward, newstate, self.player.crashed)
                    agent.remember(oldstate,action, reward, newstate, self.player.crashed)


                #------------------------------ RENDER GAME ------------------------------

                self._running = not(self.player.crashed)
                if(self.displayq):
                    self.on_render(newstate)
                # if(counter%self.frequency==0):
                #     self.on_render(newstate)
                #     pygame.image.save(self._display_surf,savepath+str(indexx))
                time.sleep (speed/1000.0)
                # indexx +=1


            #---------------------------------------------------------------------------
            #----------------------- TRAINING & DATA COLLECTION ------------------------
            #--------------------------------------------------------------------------- 

            if(params['train']):
                agent.replay_new(agent.memory, params['batch_size'])
            
            
            # self.dataCollect.add(self.player.length,duration,agent.epsilon,agent.history.losses)
            self.dataCollect.add(self.player.length,duration,agent.epsilon, 0.0)
            self.dataCollect.save()
            #print(agent.history.losses.length())
            #agent.history.losses = []
            self.player.reset(3,self.windowDimX, self.windowDimY )
            self.on_cleanup()

        #---------------------------------------------------------------------------
        #------------------------------ DATA OUTPUT --------------------------------
        #--------------------------------------------------------------------------- 
        if(params['train']):
             os.mkdir(params['weights_path_save'])
             agent.model.save_weights(params['weights_path_save']+'/weights.hdf5')
        self.dataCollect.save()
Example #13
0
class PlayerGui:
    """The main GUI of the Digitala sagor player"""
    def __init__(self, root, datamodel, config):
        """Initiate the program
        
        Arguments
        root -- the Tk instance of the application
        datamodel -- the data model instance to show
        config -- ConfigParser containing application settings
        
        """
        self._datamodel = datamodel

        #Create string variables        
        self._lblTitle = StringVar()
        self._lblAuthor = StringVar()
        self._activeInPreview = []
        self._activeInPlay = []
        self._datacollector = DataCollector()
        self._userstop = False
        
        #Initiate GUI and data
        self._setupDisplay(root, config)
        self._initData(config)
        
        #Create transparent buttons
        self._setupCategories(config)
        self._setupControls(root, config)

        #Create player
        self._playerFrame = PlayerFrame(root, self._settings.playersize, self._playbackStopped)
        self._playerFrame.setClbClicked(self._clbClicked)
        self._playerwnd = self._canvas.create_window(self._settings.playerpos[0], 
                                                     self._settings.playerpos[1], 
                                                     window = self._playerFrame, 
                                                     anchor = NW)

        #Create preview
        self._previewFrame = PreviewFrame(root, self._datacollector, self._canvas, self._settings)
        self._previewFrame.setClbActivate(self._clbClicked)
        self._previewFrame.previewsubset(self._currentMovies)

        self._updateBrowseButtons()
        
        self._setPreviewMode()

        self._dml = DataModelLoader(root, self._datamodel)
        #after_idle does not work here; 100 ms is an arbitrarily chosen delay time
        root.after(100, self._dml.load)

    def _setupDisplay(self, root, config):
        """Set screen size and add background image
        
        root -- the Tk instance of the application
        config -- ConfigParser containing application settings

        """
        fullscreen = config.getboolean(ini.general, ini.fullscreen)
        bgimage = ini.getPath(config.get(ini.general, ini.bgimage))
        
        image = Image.open(bgimage)
        backgroundIm = ImageTk.PhotoImage(image)        
        
        if(fullscreen):
            screenwidth = root.winfo_screenwidth()
            screenheight = root.winfo_screenheight()
            (w, h) = image.size
            self._scalefactor = (screenwidth / float(w), screenheight / float(h))
            image = image.resize((screenwidth, screenheight))
        else:
            (screenwidth, screenheight) = image.size
            self._scalefactor = (1, 1)
            
        geom = "{}x{}+{}+{}".format(screenwidth, screenheight, 0, 0)
        root.geometry(geom)
        root.overrideredirect(1)
        
        background = Canvas(root, width = screenwidth, height = screenheight)
        self._canvas = background
        background.pack()
        backgroundIm = ImageTk.PhotoImage(image)
        self._backgroundIm = backgroundIm
        background.create_image(0,0, image = backgroundIm, anchor = NW)
        
    def _setupCategories(self, config):
        """Add buttons to browse between categories
        
        config -- ConfigParser containing application settings
        
        """
        #Get all button data
        lines = []
        ctr = 1
        option = ini.image + str(ctr)
        
        while(config.has_option(ini.year, option)):
            line = config.get(ini.year, option)
            lines.append(line)
            ctr += 1
            option = ini.image + str(ctr)
            
        #Create as many buttons as needed
        buttondata = zip(lines, self._years)
        
        if(len(buttondata) < len(self._years)):
            print('Warning! There are more categories than category buttons - some categories will not be shown')
            
        ctr = 0
        
        for (line, year) in buttondata:
            tb = TransparentButton(self._canvas, self._settings.generalfont, line, self._scalefactor)
            tb.setText(year)
            tb.setCommand(self._ehYear)
            tb.index = ctr
            ctr += 1
            self._activeInPreview.append(tb)

    def _setupControls(self, root, config):
        """Initiate control buttons        
        
        root -- the Tk instance of the application
        config -- ConfigParser containing application settings

        """
        iniline = config.get(ini.controls, ini.prev)
        tb = TransparentButton(self._canvas, self._settings.generalfont, iniline, self._scalefactor)
        tb.setCommand(self._ehPrev)
        self._btnPrev = tb
        self._activeInPreview.append(tb)
        
        iniline = config.get(ini.controls, ini.next)
        tb = TransparentButton(self._canvas, self._settings.generalfont, iniline, self._scalefactor)
        tb.setCommand(self._ehNext)
        self._btnNext = tb
        self._activeInPreview.append(tb)

        iniline = config.get(ini.controls, ini.start)
        tb = TransparentButton(self._canvas, self._settings.generalfont, iniline, self._scalefactor)
        self._btnPlay = tb

    def _initData(self, config):
        """Initiate internal variables        
        
        config -- ConfigParser containing application settings

        """
        #Check that there are movies
        if(self._datamodel.isEmpty()):
            showerror(lang[lng.txtNoMoviesTitle], lang[lng.txtNoMovies])
            raise Exception(lang[lng.txtNoMoviesTitle])

        #Initiate variables
        self._settings = HylteSettings(config, self._scalefactor)
        self._subsetSize = self._settings.previewcolumns * 2

        self._currentYearIx = 0
        self._years = sorted(self._datamodel.allMovies.iterkeys())
        self._updateYearState()

    def _updateBrowseButtons(self):
        """Set the text of the browse buttons to indicate current subsets"""
        prev = self._currentSubsetIndex
        next = self._currentSubsetIndex + 2
        
        if(prev == 0):
            prev = self._currentSubsetCount
            
        if(next > self._currentSubsetCount):
            next = 1
               
        text = lang[lng.txtPage] + " {}/{}".format(prev, self._currentSubsetCount)
        self._btnPrev.setText(text)
        
        text = lang[lng.txtPage] + " {}/{}".format(next, self._currentSubsetCount)
        self._btnNext.setText(text)

    def _updateYearState(self):
        """Update internal variables that depend on the selected category"""
        self._currentMovies = self._datamodel.allMovies.get(self._years[self._currentYearIx])
        self._currentMovieCount = len(self._currentMovies)
        self._currentSubsetIndex = 0
        self._currentSubsetCount = (len(self._currentMovies) + self._subsetSize - 1) / self._subsetSize

    def _setPlayMode(self):
        """Enable player, disable preview"""
        for button in self._activeInPreview:
            button.setEnabled(False)

        self._btnPlay.setText(lang[lng.txtStop])
        self._btnPlay.setCommand(self._ehStop)

        self._canvas.itemconfigure(self._playerwnd, state = NORMAL)
        self._previewFrame.setVisible(False)
        self._mode = _play

    def _setPreviewMode(self):
        """Enable preview, disable player"""
        for button in self._activeInPreview:
            button.setEnabled(True)

        self._btnPlay.setText(lang[lng.txtPlay])
        self._btnPlay.setCommand(self._ehPlay)
        
        self._canvas.itemconfigure(self._playerwnd, state = HIDDEN)
        self._previewFrame.setVisible(True)
        self._mode = _preview

    def _setSubset(self):
        """Display a subset of the available media items"""
        self._updateBrowseButtons()
        start = self._subsetSize * self._currentSubsetIndex
        stop = min(start + self._subsetSize + 1, self._currentMovieCount)
        self._previewFrame.previewsubset(self._currentMovies[start:stop])
        
    def _play(self):
        """Start playback"""
        self._setPlayMode()
        self._datacollector.addStatisticLine(lng.txtPlaybackStarted)
        
        try:
            self._playerFrame.play(self._previewFrame.selecteditem)
        except:
            self._playbackStopped()

    def _stop(self):
        """Stop playback"""
        #Update time to ensure that the session doesn't end because of the movie length
        self._datacollector.reset()
        self._datacollector.addStatisticLine(lng.txtUserStoppedPlayback)
        self._userstop = True
        self._playerFrame.stop()

    #Callbacks
    def _playbackStopped(self):
        """Handle user data collection and set preview mode"""
        if(not self._userstop):
            self._datacollector.reset()
            self._datacollector.addStatisticLine(lng.txtPlaybackFinished)
            self._userstop = False

        self._setPreviewMode()

    def _clbClicked(self):
        """Switch between playback and preview depending on mode"""
        if(self._mode == _preview):
            self._play()
        elif(self._mode == _play):
            self._stop()

    #Event handlers
    def _ehPlay(self, event, tb):
        """Handle play event
        
        Arguments
        event -- event object
        tb -- transparent button instance
        
        """
        self._play()
        
    def _ehStop(self, event, tb):
        """Handle stop event
        
        Arguments
        event -- event object
        tb -- transparent button instance
        
        """
        self._stop()

    def _ehYear(self, event, tb):
        """Handle an event from one of the category buttons
        
        Arguments
        event -- event object
        tb -- transparent button instance
        
        """
        self._datacollector.detect()
        
        self._currentYearIx = tb.index
        self._updateYearState()
        self._updateBrowseButtons()
        self._previewFrame.previewsubset(self._currentMovies[:self._subsetSize])

    def _ehPrev(self, event, tb):
        """Handle an event from the browse backward button
        
        Arguments
        event -- event object
        tb -- transparent button instance
        
        """
        self._datacollector.detect()
        if(self._currentSubsetIndex > 0):
            self._currentSubsetIndex -= 1
        else:
            self._currentSubsetIndex = self._currentSubsetCount - 1
            
        self._setSubset()
                    
    def _ehNext(self, event, tb):
        """Handle an event from the browse forward button
        
        Arguments
        event -- event object
        tb -- transparent button instance
        
        """
        
        self._datacollector.detect()
        if(self._currentSubsetIndex < (self._currentSubsetCount - 1)):
            self._currentSubsetIndex += 1
        else:
            self._currentSubsetIndex = 0
            
        self._setSubset()
Example #14
0
# #########################################
# Main Simulator Module        #
# #########################################

from node import Node
from idGenerator import IdGenerator
from operation import Operation
from stats import Stats
from datacollector import DataCollector
from graphplotter import GraphPlotter

nodeIdGenerator = IdGenerator()
nodeIdGenerator.lastAllottedId = property.INITID
nodeIdGenerator.randomRange = property.RANGEID

data_collector = DataCollector()

maxId = 0

########## NODES IN RING OVERLAY #######################
nodes = []

########################################################


############## OPERATIONS AND EXPERIMENTATION ##########
def add_node(newNode, existingnode):
    newNode.join(existingnode)
    newNode.update_others(Operation.INSERT)

Example #15
0
class Radio(object):
    def __init__(self, show=-1):
        self.started = False

        self.condition = ''
        self.fullCondition = '?'

        self.preselectedShow = show

        self.clip = 0
        self.lastClip = 0
        self.conversation = 0

        self.player = pyglet.media.ManagedSoundPlayer()
        self.llPlayer = None
        self.clipDuration = 0
        self.clipStartTime = 0
        self.currentClip = None
        self.curShow = None

        #sound = pyglet.resource.media(BALL_SOUND, streaming=False)

        #self.hardConvDB = ['show1', 'show2']
        #self.easyConvDB = ['show3', 'show4']
        #self.convAnswers = []
        self.shows = [['show3', 'show4'], ['show1', 'show2']]  # easy / hard
        self.numFragments = [[1, 1], [32, 34]]
        self.answers = []
        #self.convPool = range(len(self.convDB))

        self.questionOrder = []
        self.questionIndx = 0
        self.correctAnswer = 0

        self.responded = False
        self.playedLetter = False
        self.playedRepeat = False
        self.phase = 0

        self.responseCountdown = 0.0
        self.responseLimit = 5.0
        self.clipInterval = 1.5
        self.isi = 0.0

        self.run = -1
        self.doPractice = False
        self.roadCond = 'simple'

        #self.incorrectSound = pyglet.resource.media('incorrect.wav')
        #self.correctSound = pyglet.resource.media('correct.wav')

        self.db = None

    def setConversation(self, show):
        self.preselectedShow = show

    def clearConversation(self):
        self.preselectedShow = -1

    def startTask(self, cond, driving='?', blockNum=0):
        self.condition = cond
        self.fullCondition = driving + '_' + cond
        self.run += 1

        datacond = '_' + self.roadCond + '_hard'

        if self.doPractice and self.run == 0:
            datacond = '_prac' + datacond

        if cond == 'hard':
            self.db = DataCollector(
                'Radio DB',
                'data/' + globals.participant + datacond + '_radio.dat', [
                    'pp', 'cond', 'correct', 'answer', 'conversation',
                    'question', 'condtime', 'time'
                ])
            self.db.open()

        self.started = True
        self.pickConversation()

        self.startNextClip()

    def stopTask(self):
        self.started = False

        if not self.llPlayer is None:
            self.llPlayer.terminate()
            self.llPlayer = None

        #if self.player.playing:
        #self.player.next()

        if self.condition == 'hard':
            self.db.close()

    def pickConversation(self):
        cond = 0
        if self.run == 0 and self.doPractice:
            self.curShow = 'practice'
            self.lastClip = 8
            self.clip = -1
        else:
            if self.condition == 'hard':
                cond = 1

            if len(self.shows[cond]) > 0:
                idx = random.sample(range(0, len(self.shows[cond])), 1)[0]
                self.curShow = self.shows[cond][idx]
                self.lastClip = self.numFragments[cond][idx]

                del self.shows[cond][idx]
                del self.numFragments[cond][idx]

                self.clip = -1
            else:
                self.started = False

    def startNextClip(self):
        if self.started:
            self.responseCountdown = 0.0
            self.isi = 0.0
            self.responded = False
            self.playedLetter = False
            self.phase = 0

            if self.clip + 1 >= self.lastClip:
                #self.pickConversation()
                self.clip = -1
                self.stopTask()
            else:

                self.clip += 1

                #self.currentClip = pyglet.media.load('radio/'+self.convglobals.db[self.conversation]+'_'+str(self.clip+1)+'.mp3', streaming=False)
                if self.condition == 'hard':
                    currentClip = 'radio/' + self.curShow + '/' + self.curShow + 'frag' + str(
                        self.clip + 1) + '.mp3'
                    self.playAudio(currentClip)
                else:
                    currentClip = 'radio/' + self.curShow + '/' + self.curShow + '.mp3'
                    self.playAudio(currentClip)
                    #audio_file = os.getcwd()+'/radio/show1/show1frag1q.mp3'
                    #print audio_file

                    #self.playAudio(audio_file)
                    #self.llPlayer = subprocess.Popen(["afplay", audio_file], shell=False)
                    #pid = p.pid()
                    #p.terminate()

    def draw(self):
        return 0

    def update(self, dt):
        if self.started:
            if self.condition == 'hard':
                #print str(self.player.time)+' - '+str(self.currentClip.duration)
                if not self.audioIsPlaying() and not self.responded:
                    if self.phase == 0:  # clip finished, play question
                        currentClip = 'radio/' + self.curShow + '/' + self.curShow + 'frag' + str(
                            self.clip + 1) + 'q.mp3'
                        self.playAudio(currentClip)

                        self.questionOrder = [0, 1, 2]
                        random.shuffle(self.questionOrder)
                        #print self.questionOrder
                        self.questionIdx = 0
                        self.correctAnswer = self.questionOrder.index(0)
                        self.phase = 1

                    elif self.phase == 1:  # questions finished, play options
                        if self.questionIdx > 2:
                            # repeat question after answers
                            if self.playedRepeat:
                                currentClip = 'radio/' + self.curShow + '/' + self.curShow + 'frag' + str(
                                    self.clip + 1) + 'q.mp3'
                                self.playAudio(currentClip)
                                self.playedRepeat = False

                                self.phase = 2  # options finished, go to response phase

                            else:  # say 'repeat'
                                currentClip = 'radio/herhaal.mp3'
                                self.playAudio(currentClip)
                                self.playedRepeat = True
                        else:
                            letters = ['a', 'b', 'c']
                            number = self.questionOrder[self.questionIdx]
                            letter = letters[number]

                            if self.playedLetter:
                                currentClip = 'radio/' + self.curShow + '/' + self.curShow + 'frag' + str(
                                    self.clip + 1) + letter + '.mp3'
                                self.playAudio(currentClip)

                                self.playedLetter = False
                                self.questionIdx += 1
                            else:  # play the answer letter clip first
                                currentClip = 'radio/' + letters[
                                    self.questionIdx] + '.mp3'
                                self.playAudio(currentClip)

                                self.playedLetter = True
                elif self.phase == 2:
                    self.responseCountdown += dt
                    #print self.responseCountdown

                    if self.responseCountdown >= self.responseLimit:
                        self.processResponse(-1)

                elif self.phase == 3:  # feedback
                    self.isi += dt
                    if self.isi >= self.clipInterval:
                        self.startNextClip()
            #else: # easy
            #a = 1
            #if not self.player.playing: # get next clip
            #print 'finished'
            #self.startNextClip()

    def processResponse(self, button):

        if self.started and not self.responded and self.phase == 2:
            #self.player.pause()
            #self.player.next()
            correct = 0

            let = ['A', 'B', 'C']

            if button >= 0 and button < 4:
                if self.audioIsPlaying():
                    if self.llPlayer is not None:
                        self.llPlayer.terminate()
                        self.llPlayer = None

                print 'Answer ' + let[
                    button] + ' was selected. Correct answer was ' + let[
                        self.correctAnswer]
                if button == self.correctAnswer:
                    correct = 1
            else:
                print 'No answer was selected. Correct answer was ' + let[
                    self.correctAnswer]

            if correct:
                correctSound = pyglet.resource.media('correct.wav')
                correctSound.play()
                globals.bonusCounter += globals.RADIO_QUIZ_BONUS
                #self.correctSound.play()
            else:
                incorrectSound = pyglet.resource.media('incorrect.wav')
                incorrectSound.play()
                #self.incorrectSound.play()

            self.phase = 3
            self.responded = True

            self.db.addData([
                'pp', globals.participant, 'cond', self.fullCondition,
                'correct', correct, 'answer', button, 'conversation',
                self.conversation, 'question', self.clip, 'condtime',
                r3(globals.conditionTime()), 'time',
                r3(globals.currentTime())
            ], True)

    def checkInput(self):

        if self.started:
            if self.condition == 'hard':
                if not self.responded and self.phase > 0:

                    if globals.hasWheel:
                        if globals.joystick.buttons[
                                3]:  #or globals.joystick.buttons[3]:
                            self.processResponse(0)
                        elif globals.joystick.buttons[
                                2]:  #or globals.joystick.buttons[5]:
                            self.processResponse(1)
                        elif globals.joystick.buttons[
                                0]:  #or globals.joystick.buttons[7]:
                            self.processResponse(2)

                    if globals.findKey('1') >= 0:
                        self.processResponse(0)
                    elif globals.findKey('2') >= 0:
                        self.processResponse(1)
                    elif globals.findKey('3') >= 0:
                        self.processResponse(2)

    def setCondition(self, cond):
        self.condition = cond

    def playAudio(self, filename):
        #filename2 = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+'.ogg'
        clip = pyglet.media.load(filename, streaming=False)
        self.clipDuration = clip.duration
        #print 'clip duration'
        #print clip.duration
        #audio = MP3(filename2)
        #print 'mutagen'
        #print audio.info.length
        #txt = ''
        #try:
        #subprocess.check_output(["ffmpeg", "-i", filename])
        #except subprocess.CalledProcessError, e:
        ##print "Ping stdout output:\n", e.output
        #txt = e.output

        #ffmpeg -i show1frag1.mp3 2>&1|sed -n "s/.*Duration: \([^,]*\).*/\1/p"
        #print txt
        self.clipStartTime = time.time()
        #print 'clip start time'
        #print self.clipStartTime
        clip = None

        if globals.onOSX:
            # using command prompt because pyglet media player is really buggy...
            self.llPlayer = subprocess.Popen(["afplay", filename], shell=False)
            if self.clipDuration > 5.0:
                print 'Audio PID: ' + str(
                    self.llPlayer.pid) + ' (kill with "kill <PID>" in bash)'
        else:
            # windows requires mplayer to be installed
            self.llPlayer = subprocess.Popen(
                ["mplayer", filename, "-ss", "30"],
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)

    def audioIsPlaying(self):
        if not self.llPlayer is None:
            tm = time.time()
            #print str(tm-self.clipStartTime)+' - '+str(tm)+' - '+str(self.clipStartTime+self.clipDuration)
            if tm - self.clipStartTime >= self.clipDuration:
                #print 'end time'
                #print tm
                if self.llPlayer is not None:
                    if not globals.onOSX:
                        self.llPlayer.stdin.write("q")
                    self.llPlayer.terminate()
                    self.llPlayer = None

                return False

        return True
        display.write_message(message)

        datafile = "%s/%s.%s" % (dataPath, _getCurrentTime(), logEnding)

        loggedBytes += datacollector.write_data_log(datafile,
                                                    nbrOfOBDFrames=1000,
                                                    messagesPerTimestamp=20)

        print "Collected data log..."
        gzip.add_file_for_compression(datafile)


if __name__ == "__main__":
    ## make sure the script is called correctly
    if 2 != len(sys.argv):
        raise OSError(
            "[ERROR] Correct usage:\n  python obd2collector <data directory>")

    dataPath = sys.argv[1]

    print "Starting OBD2 DataCollector"

    display = Display(autostart=True)
    datacollector = DataCollector(display)

    try:
        main(dataPath, datacollector, display)
    except KeyboardInterrupt:
        ## close all threads (hopefully)
        Thread.shutdown()
Example #17
0
os.mkdir('./data/transactions/ponzi/')
os.mkdir('./data/transactions/nonponzi/')
os.mkdir('./data/internal_transactions/ponzi/')
os.mkdir('./data/internal_transactions/nonponzi/')
os.mkdir('./data/contracts/ponzi/')
os.mkdir('./data/contracts/nonponzi/')

# important files
ponzi_contract_csv = './data/ponziContracts.csv'
nonponzi_contract_csv = './data/non_ponziContracts.csv'
opcode_csv = './data/Opcodes.csv'

# Download transaction data
print("Downloading Opcode.csv file...(over 4 GB)")
subprocess.call(['./download_opcodes.sh'])
datacollector = DataCollector(ponzi_contract_csv, nonponzi_contract_csv)

# download opcode data
opcode_collector = OpcodeColector(ponzi_contract_csv, nonponzi_contract_csv,
                                  opcode_csv)

# (RECOMMENDED!) OPTION 2: download the dataset from this link; unzip and place the data folder in the root folder
# comment out the code from line 13 to line 45
# Link: https://drive.google.com/open?id=1izaOs4Mlp6dxdRMtRYQeUfkDhlqLf4Z6

# Extract features
extract_feature_of_all_contract = ExtractFeatureOfAllContract()

# Train models
model_performance = Model()
Example #18
0
    def startNextState(self, car, traffic, renderer):

        state = self.flow[self.flowIdx]

        if state.type == 'screen':
            renderer.textScreens[self.flow[self.flowIdx].id].start()

        elif state.type == 'drive':
            self.block += 1
            self.blockDuration = state.duration

            # reset damage counter after practice
            if self.doPractice and self.block == 2:
                globals.bonusCounter = globals.STARTING_BONUS
                self.radioTask.doPractice = False
                self.tabletTask.doPractice = False

            print '\n########\nSTARTING BLOCK '+str(self.block-1)+'\n########\n'+state.id+' & '+state.secondary+', length: '+str(self.blockDuration)
            print 'Current bonus: '+str(globals.bonusCounter/10.0)

            # set up databases for driving
            globals.db['blinker'] = DataCollector('Blinker DB', 'data/'+globals.participant+'_'+self.getCondition()+'_blinker.dat',
                                            ['pp', 'condition', 'direction', 'block', 'condtime', 'time'])
            globals.db['lanetransition'] = DataCollector('Lane Transitions DB', 'data/'+globals.participant+'_'+self.getCondition()+'_transition.dat',
                                            ['pp', 'condition', 'blinkerused', 'blinkdir', 'cardir', 'congruent', 'block', 'condtime', 'time'])
            globals.db['overtake'] = DataCollector('Overtaken Slow Traffic DB', 'data/'+globals.participant+'_'+self.getCondition()+'_overtake.dat',
                                            ['pp', 'condition', 'state', 'carNum', 'carDist', 'block', 'condtime', 'time'])
            globals.db['collision'] = DataCollector('Collision DB', 'data/'+globals.participant+'_'+self.getCondition()+'_collision.dat',
                                            ['pp', 'condition', 'zone', 'carType', 'carNum', 'block', 'condtime', 'time'])
            globals.db['car'] = DataCollector('Car DB', 'data/'+globals.participant+'_'+self.getCondition()+'_car.dat', ['pp', 'condition',
                                        'speed', 'wheelangle', 'yposition', 'deviation', 'accel', 'break', 'xposition', 'block', 'condtime', 'time'])
            globals.db['slowcars'] = DataCollector('Slow Car DB', 'data/'+globals.participant+'_'+self.getCondition()+'_slowcars.dat', ['pp', 'condition',
                                        'id', 'speed', 'xposition', 'distance', 'block', 'condtime', 'time'])
            globals.db['fastcars'] = DataCollector('Fast Car DB', 'data/'+globals.participant+'_'+self.getCondition()+'_fastcars.dat', ['pp', 'condition',
                                        'id', 'speed', 'xposition', 'distance', 'block', 'condtime', 'time'])
            globals.db['blinker'].open()
            globals.db['lanetransition'].open()
            globals.db['car'].open()
            globals.db['overtake'].open()
            globals.db['collision'].open()
            globals.db['slowcars'].open()
            globals.db['fastcars'].open()

            # traffic.road = self.roads[self.block]
            # car.bindRoad(self.roads[self.block])

            traffic.reset(self.roads[self.block])
            car.reset(self.roads[self.block])

            # populate road with traffic
            if state.id == 'complex':
                sDiff = abs(globals.EXPECTED_SPEED - globals.SLOW_SPEED)
                dist = sDiff * globals.DRIVE_DURATION
                overtakes = globals.COMPLEX_OVERTAKES
                duration = globals.DRIVE_DURATION

                if doPractice and block < 2: # practice blocks
                    duration = globals.PRACTICE_DURATION
                    overtakes = globals.PRACTICE_OVERTAKES

                interval = dist / overtakes
                #print interval
                #interval = 20.0
                print 'Generating complex traffic'
                traffic.genStaticCars(overtakes, globals.EXPECTED_SPEED, globals.SLOW_SPEED, duration, globals.RIGHT_LANE)
            else:
                sDiff = abs(globals.EXPECTED_SPEED - globals.FAST_SPEED)
                dist = sDiff * globals.DRIVE_DURATION
                interval = dist / globals.SIMPLE_OVERTAKERS
                #print interval
                print 'Generating simple traffic'
                traffic.genStaticCars(globals.SIMPLE_OVERTAKERS, globals.EXPECTED_SPEED, globals.FAST_SPEED, globals.DRIVE_DURATION, globals.LEFT_LANE)

            if state.secondary != 'none':
                print 'Starting secondary task'
                if state.secondary == 'tablet':
                    print 'Tablet task selected'
                    self.secondary = self.tabletTask
                else:
                    print 'Radio task selected'
                    self.secondary = self.radioTask

                if (not self.doPractice or (self.block > 1)) and state.secondary != 'easy':
                    print 'Loading hard task conditions (quiz or tablet)'

                    curShow = 0

                    if self.fixedOrder:
                        print 'Quiz show order fixed on participant ID'
                        ppid = globals.participant.split('D') # names have the form SDXX or CDXX with XX between 01 and 24

                        even = False
                        if len(ppid) > 1:
                            ppidx = int(ppid[1])
                            if (ppidx % 2) == 0:
                                even = True

                        if even:
                            if state.secondary == 'tablet':
                                curShow = 0
                            else:
                                curShow = 1
                        else: # odd
                            if state.secondary == 'tablet':
                                curShow = 1
                            else:
                                curShow = 0

                    else:
                        print 'Picking quiz show randomly'
                        # pick a show
                        idx = random.sample(range(0,len(self.shows)), 1)[0]
                        curShow = self.shows[idx]

                        del self.shows[idx]

                    print 'Playing show '+str(curShow)
                    self.secondary.setConversation(curShow)

                self.secondary.startTask(state.secondary, state.id)

            print 'Beginning driving section'

            globals.conditionStartTime = helpers.currentTime()
Example #19
0
class Tablet(object):

    def __init__(self, show=-1):
        self.started = False

        self.condition = ''
        self.fullCondition = '?'

        self.preselectedShow = show

        self.clip = 0
        self.lastClip = 0
        self.conversation = 0

        self.clipLines = []
        self.minLineIdx = 0
        self.maxLineIdx = 0
        self.clipDuration = 0
        self.lineInterval = 0
        self.clipStartTime = 0
        self.currentClip = None
        self.curShow = None
        self.clipQuestion = None

        self.tabletTexture = image.load('img/dashboard-tablet.png').texture
        self.hh = 1.0
        self.ww = 1.0
        self.tabletDisplayText = ''
        self.maxDisplayLength = 10 # max num of lines displayed
        self.tabletLineWidth = 40

        self.allShows = ['show1', 'show2'] # easy / hard
        self.allNumFragments = [32,34]
        self.shows = self.allShows
        self.numFragments = self.allNumFragments

        self.answers = []

        self.questionOrder = []
        self.questionIndx = 0
        self.correctAnswer = 0

        self.responded = False
        self.playedLetter = False
        self.playedRepeat = False
        self.phase = 0

        self.responseCountdown = 0.0
        self.responseLimit = 15.0
        self.clipInterval = 1.5
        self.lastLineTime = 0.0
        self.isi = 0.0

        self.run = -1
        self.doPractice = False
        self.roadCond = 'simple'

        #self.incorrectSound = pyglet.resource.media('incorrect.wav')
        #self.correctSound = pyglet.resource.media('correct.wav')

        self.db = None

    def setConversation(self, show):
        self.preselectedShow = show

    def clearConversation(self):
        self.preselectedShow = -1

    def startTask(self, cond, driving='?', blockNum=0):
        self.condition = cond
        self.fullCondition = driving+'_'+cond
        self.run += 1

        datacond = '_' + self.roadCond + '_tablet'

        if self.doPractice:
            datacond = '_prac' + datacond

        self.db = DataCollector('Tablet DB', 'data/'+globals.participant+datacond+'_tablet.dat', ['pp', 'cond', 'correct',
                                                                           'answer', 'conversation', 'question',
                                                                           'condtime', 'time'])
        self.db.open()

        self.started = True
        self.pickConversation()

        self.startNextClip()

    def stopTask(self):
        self.started = False

        #if self.player.playing:
            #self.player.next()

        self.db.close()

    def pickConversation(self):
        if self.doPractice:
            self.curShow = 'practice'
            self.lastClip = 8
            self.clip = -1
            print 'Loaded practice show'
        else:
            if self.preselectedShow > -1:
                self.curShow = self.allShows[self.preselectedShow]
                self.lastClip = self.allNumFragments[self.preselectedShow]

                self.clip = -1
                print 'Loaded preselected:'+self.curShow
            else:
                if len(self.shows) > 0:
                    idx = random.sample(range(0,len(self.shows)), 1)[0]
                    self.curShow = self.shows[0] #self.shows[idx]
                    self.lastClip = self.numFragments[idx]

                    del self.shows[idx]
                    del self.numFragments[idx]

                    self.clip = -1

                    print 'Loaded random:'+self.curShow
                else:
                    print 'No shows left to load'
                    self.started = False

    def startNextClip(self):
        if self.started:
            self.responseCountdown = 0.0
            self.isi = 0.0
            self.responded = False
            self.playedLetter = False
            self.phase = 1
            self.minLineIdx = -1
            self.maxLineIdx = -1
            self.lastLineTime = 0
            self.clipLines = []

            if self.clip+1 >= self.lastClip:
                #self.pickConversation()
                self.clip = -1
                self.stopTask()
            else:

                self.clip += 1
                #self.clip = 0

                #print 'play clip: ' + str(self.clip+1) + ' of '+ str(self.lastClip)
                with open('tablet/'+str(self.curShow)+'/'+str(self.curShow)+'frag'+str(self.clip+1)+'.txt') as f:
                    lines = f.readlines()

                    duration = lines[0]
                    duration = duration.split(':')
                    self.clipDuration = (float(duration[0])*60 + float(duration[1]))
                    #print 'Duration of clip: '+str(self.clipDuration)

                    i = 1
                    while len(lines[i]) > 2:
                        l = lines[i].split(' - ')
                        #print str(len(lines[i]))
                        #print lines[i]
                        #print l[0]
                        #print l[1]
                        self.clipLines.append(ClipLine(l[0], l[1]))
                        i += 1

                    self.lineInterval = self.clipDuration / len(self.clipLines)
                    #print 'Line interval: '+str(self.clipDuration)

                    #print 'Question: '+lines[i+1]
                    self.clipQuestion = Question(lines[i+1], lines[i+2], lines[i+3], lines[i+4])

                self.clipStartTime = globals.currentTime()

                #currentClip = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+'.mp3'
                #self.playAudio(currentClip)

    def updateTabletText(self, dt):
        time = globals.currentTime()
        if time - self.lastLineTime >= self.lineInterval and self.phase < 2: # time to show the next line on screen
            #print 'Show new line. maxLineIdx from '+str(self.maxLineIdx)+' to '+str(self.maxLineIdx+1)
            #self.minLineIdx += 1
            self.maxLineIdx += 1
            self.lastLineTime = time

            if self.maxLineIdx < len(self.clipLines):
                #print 'Still new lines left to show'
                totalLen = 0 # determine total displayed text length in lines (approx)
                for i in range(self.minLineIdx+1, self.maxLineIdx+1):
                    totalLen += self.clipLines[i].lineLen(self.tabletLineWidth)

                #print 'Total length: '+str(totalLen)

                # displayed text will be too long, strip oldest lines until it fits
                if totalLen > self.maxDisplayLength:
                    #print 'Got to remove some lines from the display'
                    difference = totalLen - self.maxDisplayLength
                    while difference > 0:
                        ll = self.clipLines[self.minLineIdx+1].lineLen(self.tabletLineWidth)
                        difference -= ll
                        self.minLineIdx += 1

                # update displayed text
                self.tabletDisplayText = '<font face="Helvetica,Arial" size=+1>'
                for i in range(self.minLineIdx+1, self.maxLineIdx+1):
                    self.tabletDisplayText += self.clipLines[i].output()+'<br><br>'

                self.tabletDisplayText += '</font>'

                correctSound = pyglet.resource.media('message.wav')
                correctSound.play()
                #self.tabletText.append(self.clipLines[self.lineIdx].output())

    def clipFinished(self):

        time = globals.currentTime()
        if time - self.clipStartTime < self.clipDuration + self.lineInterval:
            return False

        return True

    def draw(self):
        if self.started:
            yOff = 0

            screenRatio = float(self.hh) / float(self.ww)
            glPushMatrix()
            glBindTexture(GL_TEXTURE_2D, self.tabletTexture.id)
            pyglet.graphics.draw(4, pyglet.gl.GL_QUADS,
                                    ('v3f', (-0.8*screenRatio, -1.0+yOff, 0.0,
                                             -0.8*screenRatio, -0.2+yOff, 0.0,
                                             -0.25*screenRatio, -0.2+yOff, 0.0,
                                             -0.25*screenRatio, -1.0+yOff, 0.0)),
                                    ('t2f', (0.0, 0.0,
                                            0.0, 1.0,
                                            1.0, 1.0,
                                            1.0, 0.0))
                                )
            glPopMatrix()
            self.set_text()

            displayText = None

            #if self.phase < 2:
            displayText = self.tabletDisplayText
            #else:
                #displayText = self.tabletDisplayQuestion

            #print displayText
            text = pyglet.text.HTMLLabel(displayText,
                          x=int(0.3*self.ww), y=int(0.17*self.hh),
                          multiline=True,
                          width=int(0.48*self.ww),
                          height=(0.3*self.hh),
                          #color=(0,0,0,255),
                          anchor_x='center', anchor_y='center')
            text.draw()

            self.set_2d()

    def set_2d(self):
        w = self.ww
        h = self.hh
        glDisable(GL_DEPTH_TEST)
        glViewport(0, 0, w, h)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        glOrtho(-1, 1, -1, 1, -1, 1)
        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()

    def set_text(self):
        w = self.ww
        h = self.hh
        glDisable(GL_DEPTH_TEST)
        glViewport(0, 0, w, h)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluOrtho2D(0, w, 0, h, -1, 1)
        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()


    def update(self, dt):
        if self.started:
            if self.phase == 1:
                if not self.clipFinished() and not self.responded: # clip is still running on the tablet
                    self.updateTabletText(dt)

                else: # clip is finished playing
                    self.phase = 2

                    self.buildQuestion()

                    questionSound = pyglet.resource.media('question.wav')
                    questionSound.play()
            elif self.phase == 2:

                self.responseCountdown += dt
                #print self.responseCountdown

                if self.responseCountdown >= self.responseLimit:
                    self.processResponse(-1)

            elif self.phase == 3: # feedback
                self.isi += dt
                if self.isi >= self.clipInterval:
                    self.startNextClip()

    def buildQuestion(self):
        self.tabletDisplayText = '<font face="Helvetica,Arial" size=+1>'

        self.tabletDisplayText += self.clipQuestion.question+'<br><br>'
        alph = ['A','B','C']
        self.questionOrder = [0, 1, 2]
        random.shuffle(self.questionOrder)
        self.correctAnswer = self.questionOrder.index(0)

        for i in range(0,3):
            self.tabletDisplayText += '<b>'+alph[i]+'.</b> '+self.clipQuestion.ans[self.questionOrder[i]]+'<br><br>'

        self.tabletDisplayText += '</font>'

    def processResponse(self, button):

        if self.started and not self.responded and self.phase == 2:
            #self.player.pause()
            #self.player.next()
            correct = 0

            let = ['A', 'B', 'C']

            if button >= 0 and button < 4:

                print 'Answer '+let[button]+' was selected. Correct answer was '+let[self.correctAnswer]
                if button ==  self.correctAnswer:
                    correct = 1
            else:
                print 'No answer was selected. Correct answer was '+let[self.correctAnswer]

            if correct:
                correctSound = pyglet.resource.media('correct.wav')
                correctSound.play()
                globals.bonusCounter += globals.TABLET_QUIZ_BONUS
                #self.correctSound.play()
            else:
                incorrectSound = pyglet.resource.media('incorrect.wav')
                incorrectSound.play()
                #self.incorrectSound.play()

            self.phase = 3
            self.responded = True
            self.tabletDisplayText = ''

            self.db.addData(['pp', globals.participant, 'cond', self.fullCondition, 'correct', correct,
                            'answer', button, 'conversation', self.conversation, 'question', self.clip,
                            'condtime', r3(globals.conditionTime()), 'time', r3(globals.currentTime())], True)

    def checkInput(self):

        if self.started:
            if not self.responded and self.phase > 1:

                if globals.hasWheel:
                    if globals.joystick.buttons[3]: #or globals.joystick.buttons[3]:
                        self.processResponse(0)
                    elif globals.joystick.buttons[2]: #or globals.joystick.buttons[5]:
                        self.processResponse(1)
                    elif globals.joystick.buttons[0]: #or globals.joystick.buttons[7]:
                        self.processResponse(2)

                if globals.findKey('1') >= 0:
                    self.processResponse(0)
                elif globals.findKey('2') >= 0:
                    self.processResponse(1)
                elif globals.findKey('3') >= 0:
                    self.processResponse(2)

    def setCondition(self, cond):
        self.condition = cond

    def playAudio(self, filename):
        return False

    def audioIsPlaying(self):
        return False
Example #20
0
class Trainer:
    def __init__(self, args):

        self._logger = logging.getLogger('Traniner')
        self._checkpoint = {}

        config_name = args.config

        # hardcode the config path just for convinence.
        cfg = Utils.config("./config/" + config_name)

        number_of_planes = cfg['GAME'].getint('number_of_planes')
        board_size = cfg['GAME'].getint('board_size')
        encoder_name = cfg['GAME'].get('encoder_name')

        az_mcts_rounds_per_move = cfg['AZ_MCTS'].getint('rounds_per_move')
        az_mcts_temperature = cfg['AZ_MCTS'].getfloat('temperature')
        c_puct = cfg['AZ_MCTS'].getfloat('C_puct')

        basic_mcts_c_puct = cfg['BASIC_MCTS'].getfloat('C_puct')

        buffer_size = cfg['TRAIN'].getint('buffer_size')

        batch_size = cfg['TRAIN'].getint('batch_size')

        epochs = cfg['TRAIN'].getint('epochs')

        self._basic_mcts_rounds_per_move = cfg['BASIC_MCTS'].getint(
            'rounds_per_move')
        self._latest_checkpoint_file = './checkpoints/' + config_name.split(
            '.')[0] + '/latest.pth.tar'
        self._best_checkpoint_file = './checkpoints/' + config_name.split(
            '.')[0] + '/best.pth.tar'

        check_number_of_games = cfg['EVALUATE'].getint('number_of_games')

        os.makedirs(os.path.dirname(self._latest_checkpoint_file),
                    exist_ok=True)
        os.makedirs(os.path.dirname(self._best_checkpoint_file), exist_ok=True)

        use_cuda = torch.cuda.is_available()
        devices_ids = []
        if use_cuda:
            devices_ids = list(map(int, args.gpu_ids.split(',')))
            num_devices = torch.cuda.device_count()
            if len(devices_ids) > num_devices:
                raise Exception(
                    '#available gpu : {} < --device_ids : {}'.format(
                        num_devices, len(devices_ids)))

        if encoder_name == 'SnapshotEncoder':
            encoder = SnapshotEncoder(number_of_planes, board_size)
            input_shape = (number_of_planes, board_size, board_size)

        if encoder_name == 'DeepMindEncoder':
            encoder = DeepMindEncoder(number_of_planes, board_size)
            input_shape = (number_of_planes * 2 + 1, board_size, board_size)

        if encoder_name == 'BlackWhiteEncoder':
            encoder = BlackWhiteEncoder(number_of_planes, board_size)
            input_shape = (number_of_planes * 2 + 2, board_size, board_size)

        self._model_name = cfg['MODELS'].get('net')
        self._model = ResNet8Network(
            input_shape, board_size * board_size
        ) if self._model_name == 'ResNet8Network' else Simple5Network(
            input_shape, board_size * board_size)

        self._optimizer = Utils.get_optimizer(self._model.parameters(), cfg)

        self._experience_buffer = ExpericenceBuffer(buffer_size)
        self._check_frequence = cfg['TRAIN'].getint('check_frequence')

        self._start_game_index = 1
        self._train_number_of_games = cfg['TRAIN'].getint('number_of_games')

        # Be aware this is not the first time to run this program
        resume = args.resume
        if resume:
            self._checkpoint = torch.load(self._latest_checkpoint_file,
                                          map_location='cpu')
            if self._checkpoint['model_name'] == self._model_name:
                if use_cuda:
                    self._model.to(torch.device('cuda:' + str(devices_ids[0])))
                else:
                    self._model.to(torch.device('cpu'))

                self._model.load_state_dict(self._checkpoint['model'])
                self._optimizer.load_state_dict(self._checkpoint['optimizer'])
                self._basic_mcts_rounds_per_move = self._checkpoint[
                    'basic_mcts_rounds_per_move']

                self._start_game_index = self._checkpoint['game_index']
                self._experience_buffer.data = self._checkpoint[
                    'experience_buffer'].data
                self._logger.debug(
                    'ExpericenceBuffer size is {} when loading from checkpoint'
                    .format(self._experience_buffer.size()))

        writer = SummaryWriter(log_dir='./runs/' + config_name.split('.')[0])

        self._data_collector = DataCollector(encoder, self._model,
                                             az_mcts_rounds_per_move, c_puct,
                                             az_mcts_temperature, board_size,
                                             number_of_planes, devices_ids,
                                             use_cuda)

        self._policy_improver = PolicyImprover(self._model, batch_size, epochs,
                                               devices_ids, use_cuda,
                                               self._optimizer, writer)

        self._policy_checker = PolicyChecker(
            devices_ids, use_cuda, encoder, board_size, number_of_planes,
            self._model, az_mcts_rounds_per_move, c_puct, az_mcts_temperature,
            basic_mcts_c_puct, check_number_of_games, writer)

    def run(self):

        mp.set_start_method('spawn', force=True)
        self._checkpoint['model_name'] = self._model_name

        best_ratio = 0.0
        for game_index in tqdm(range(self._start_game_index,
                                     self._train_number_of_games + 1),
                               desc='Training Loop'):
            self._checkpoint['game_index'] = game_index

            # collect data via self-playing
            collected_data = self._data_collector.collect_data(game_index)
            self._experience_buffer.merge(collected_data)
            self._checkpoint['experience_buffer'] = self._experience_buffer

            # update the policy
            self._policy_improver.improve_policy(game_index,
                                                 self._experience_buffer)
            self._checkpoint['model'] = self._model.state_dict()
            self._checkpoint['optimizer'] = self._optimizer.state_dict()

            # check the policy
            if game_index % self._check_frequence == 0:
                win_ratio = self._policy_checker.check_policy(
                    game_index, self._basic_mcts_rounds_per_move)
                self._checkpoint[
                    'basic_mcts_rounds_per_move'] = self._basic_mcts_rounds_per_move
                self._checkpoint['best_score'] = win_ratio

                # save the latest policy
                torch.save(self._checkpoint, self._latest_checkpoint_file)
                if win_ratio > best_ratio:
                    best_ratio = win_ratio
                    self._logger.info(
                        "New best score {:.2%} against MCTS ({} rounds per move) "
                        .format(win_ratio, self._basic_mcts_rounds_per_move))

                    # save the best_policy
                    torch.save(self._checkpoint, self._best_checkpoint_file)
                    if (best_ratio > 0.8
                            and self._basic_mcts_rounds_per_move < 10000):
                        self._basic_mcts_rounds_per_move += 1000
                        best_ratio = 0.0
Example #21
0
class Tablet(object):
    def __init__(self, show=-1):
        self.started = False

        self.condition = ''
        self.fullCondition = '?'

        self.preselectedShow = show

        self.clip = 0
        self.lastClip = 0
        self.conversation = 0

        self.clipLines = []
        self.minLineIdx = 0
        self.maxLineIdx = 0
        self.clipDuration = 0
        self.lineInterval = 0
        self.clipStartTime = 0
        self.currentClip = None
        self.curShow = None
        self.clipQuestion = None

        self.tabletTexture = image.load('img/dashboard-tablet.png').texture
        self.hh = 1.0
        self.ww = 1.0
        self.tabletDisplayText = ''
        self.maxDisplayLength = 10  # max num of lines displayed
        self.tabletLineWidth = 40

        self.allShows = ['show1', 'show2']  # easy / hard
        self.allNumFragments = [32, 34]
        self.shows = self.allShows
        self.numFragments = self.allNumFragments

        self.answers = []

        self.questionOrder = []
        self.questionIndx = 0
        self.correctAnswer = 0

        self.responded = False
        self.playedLetter = False
        self.playedRepeat = False
        self.phase = 0

        self.responseCountdown = 0.0
        self.responseLimit = 15.0
        self.clipInterval = 1.5
        self.lastLineTime = 0.0
        self.isi = 0.0

        self.run = -1
        self.doPractice = False
        self.roadCond = 'simple'

        #self.incorrectSound = pyglet.resource.media('incorrect.wav')
        #self.correctSound = pyglet.resource.media('correct.wav')

        self.db = None

    def setConversation(self, show):
        self.preselectedShow = show

    def clearConversation(self):
        self.preselectedShow = -1

    def startTask(self, cond, driving='?', blockNum=0):
        self.condition = cond
        self.fullCondition = driving + '_' + cond
        self.run += 1

        datacond = '_' + self.roadCond + '_tablet'

        if self.doPractice:
            datacond = '_prac' + datacond

        self.db = DataCollector(
            'Tablet DB',
            'data/' + globals.participant + datacond + '_tablet.dat', [
                'pp', 'cond', 'correct', 'answer', 'conversation', 'question',
                'condtime', 'time'
            ])
        self.db.open()

        self.started = True
        self.pickConversation()

        self.startNextClip()

    def stopTask(self):
        self.started = False

        #if self.player.playing:
        #self.player.next()

        self.db.close()

    def pickConversation(self):
        if self.doPractice:
            self.curShow = 'practice'
            self.lastClip = 8
            self.clip = -1
            print 'Loaded practice show'
        else:
            if self.preselectedShow > -1:
                self.curShow = self.allShows[self.preselectedShow]
                self.lastClip = self.allNumFragments[self.preselectedShow]

                self.clip = -1
                print 'Loaded preselected:' + self.curShow
            else:
                if len(self.shows) > 0:
                    idx = random.sample(range(0, len(self.shows)), 1)[0]
                    self.curShow = self.shows[0]  #self.shows[idx]
                    self.lastClip = self.numFragments[idx]

                    del self.shows[idx]
                    del self.numFragments[idx]

                    self.clip = -1

                    print 'Loaded random:' + self.curShow
                else:
                    print 'No shows left to load'
                    self.started = False

    def startNextClip(self):
        if self.started:
            self.responseCountdown = 0.0
            self.isi = 0.0
            self.responded = False
            self.playedLetter = False
            self.phase = 1
            self.minLineIdx = -1
            self.maxLineIdx = -1
            self.lastLineTime = 0
            self.clipLines = []

            if self.clip + 1 >= self.lastClip:
                #self.pickConversation()
                self.clip = -1
                self.stopTask()
            else:

                self.clip += 1
                #self.clip = 0

                #print 'play clip: ' + str(self.clip+1) + ' of '+ str(self.lastClip)
                with open('tablet/' + str(self.curShow) + '/' +
                          str(self.curShow) + 'frag' + str(self.clip + 1) +
                          '.txt') as f:
                    lines = f.readlines()

                    duration = lines[0]
                    duration = duration.split(':')
                    self.clipDuration = (float(duration[0]) * 60 +
                                         float(duration[1]))
                    #print 'Duration of clip: '+str(self.clipDuration)

                    i = 1
                    while len(lines[i]) > 2:
                        l = lines[i].split(' - ')
                        #print str(len(lines[i]))
                        #print lines[i]
                        #print l[0]
                        #print l[1]
                        self.clipLines.append(ClipLine(l[0], l[1]))
                        i += 1

                    self.lineInterval = self.clipDuration / len(self.clipLines)
                    #print 'Line interval: '+str(self.clipDuration)

                    #print 'Question: '+lines[i+1]
                    self.clipQuestion = Question(lines[i + 1], lines[i + 2],
                                                 lines[i + 3], lines[i + 4])

                self.clipStartTime = globals.currentTime()

                #currentClip = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+'.mp3'
                #self.playAudio(currentClip)

    def updateTabletText(self, dt):
        time = globals.currentTime()
        if time - self.lastLineTime >= self.lineInterval and self.phase < 2:  # time to show the next line on screen
            #print 'Show new line. maxLineIdx from '+str(self.maxLineIdx)+' to '+str(self.maxLineIdx+1)
            #self.minLineIdx += 1
            self.maxLineIdx += 1
            self.lastLineTime = time

            if self.maxLineIdx < len(self.clipLines):
                #print 'Still new lines left to show'
                totalLen = 0  # determine total displayed text length in lines (approx)
                for i in range(self.minLineIdx + 1, self.maxLineIdx + 1):
                    totalLen += self.clipLines[i].lineLen(self.tabletLineWidth)

                #print 'Total length: '+str(totalLen)

                # displayed text will be too long, strip oldest lines until it fits
                if totalLen > self.maxDisplayLength:
                    #print 'Got to remove some lines from the display'
                    difference = totalLen - self.maxDisplayLength
                    while difference > 0:
                        ll = self.clipLines[self.minLineIdx + 1].lineLen(
                            self.tabletLineWidth)
                        difference -= ll
                        self.minLineIdx += 1

                # update displayed text
                self.tabletDisplayText = '<font face="Helvetica,Arial" size=+1>'
                for i in range(self.minLineIdx + 1, self.maxLineIdx + 1):
                    self.tabletDisplayText += self.clipLines[i].output(
                    ) + '<br><br>'

                self.tabletDisplayText += '</font>'

                correctSound = pyglet.resource.media('message.wav')
                correctSound.play()
                #self.tabletText.append(self.clipLines[self.lineIdx].output())

    def clipFinished(self):

        time = globals.currentTime()
        if time - self.clipStartTime < self.clipDuration + self.lineInterval:
            return False

        return True

    def draw(self):
        if self.started:
            yOff = 0

            screenRatio = float(self.hh) / float(self.ww)
            glPushMatrix()
            glBindTexture(GL_TEXTURE_2D, self.tabletTexture.id)
            pyglet.graphics.draw(
                4, pyglet.gl.GL_QUADS,
                ('v3f',
                 (-0.8 * screenRatio, -1.0 + yOff, 0.0, -0.8 * screenRatio,
                  -0.2 + yOff, 0.0, -0.25 * screenRatio, -0.2 + yOff, 0.0,
                  -0.25 * screenRatio, -1.0 + yOff, 0.0)),
                ('t2f', (0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0)))
            glPopMatrix()
            self.set_text()

            displayText = None

            #if self.phase < 2:
            displayText = self.tabletDisplayText
            #else:
            #displayText = self.tabletDisplayQuestion

            #print displayText
            text = pyglet.text.HTMLLabel(
                displayText,
                x=int(0.3 * self.ww),
                y=int(0.17 * self.hh),
                multiline=True,
                width=int(0.48 * self.ww),
                height=(0.3 * self.hh),
                #color=(0,0,0,255),
                anchor_x='center',
                anchor_y='center')
            text.draw()

            self.set_2d()

    def set_2d(self):
        w = self.ww
        h = self.hh
        glDisable(GL_DEPTH_TEST)
        glViewport(0, 0, w, h)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        glOrtho(-1, 1, -1, 1, -1, 1)
        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()

    def set_text(self):
        w = self.ww
        h = self.hh
        glDisable(GL_DEPTH_TEST)
        glViewport(0, 0, w, h)
        glMatrixMode(GL_PROJECTION)
        glLoadIdentity()
        gluOrtho2D(0, w, 0, h, -1, 1)
        glMatrixMode(GL_MODELVIEW)
        glLoadIdentity()

    def update(self, dt):
        if self.started:
            if self.phase == 1:
                if not self.clipFinished(
                ) and not self.responded:  # clip is still running on the tablet
                    self.updateTabletText(dt)

                else:  # clip is finished playing
                    self.phase = 2

                    self.buildQuestion()

                    questionSound = pyglet.resource.media('question.wav')
                    questionSound.play()
            elif self.phase == 2:

                self.responseCountdown += dt
                #print self.responseCountdown

                if self.responseCountdown >= self.responseLimit:
                    self.processResponse(-1)

            elif self.phase == 3:  # feedback
                self.isi += dt
                if self.isi >= self.clipInterval:
                    self.startNextClip()

    def buildQuestion(self):
        self.tabletDisplayText = '<font face="Helvetica,Arial" size=+1>'

        self.tabletDisplayText += self.clipQuestion.question + '<br><br>'
        alph = ['A', 'B', 'C']
        self.questionOrder = [0, 1, 2]
        random.shuffle(self.questionOrder)
        self.correctAnswer = self.questionOrder.index(0)

        for i in range(0, 3):
            self.tabletDisplayText += '<b>' + alph[
                i] + '.</b> ' + self.clipQuestion.ans[
                    self.questionOrder[i]] + '<br><br>'

        self.tabletDisplayText += '</font>'

    def processResponse(self, button):

        if self.started and not self.responded and self.phase == 2:
            #self.player.pause()
            #self.player.next()
            correct = 0

            let = ['A', 'B', 'C']

            if button >= 0 and button < 4:

                print 'Answer ' + let[
                    button] + ' was selected. Correct answer was ' + let[
                        self.correctAnswer]
                if button == self.correctAnswer:
                    correct = 1
            else:
                print 'No answer was selected. Correct answer was ' + let[
                    self.correctAnswer]

            if correct:
                correctSound = pyglet.resource.media('correct.wav')
                correctSound.play()
                globals.bonusCounter += globals.TABLET_QUIZ_BONUS
                #self.correctSound.play()
            else:
                incorrectSound = pyglet.resource.media('incorrect.wav')
                incorrectSound.play()
                #self.incorrectSound.play()

            self.phase = 3
            self.responded = True
            self.tabletDisplayText = ''

            self.db.addData([
                'pp', globals.participant, 'cond', self.fullCondition,
                'correct', correct, 'answer', button, 'conversation',
                self.conversation, 'question', self.clip, 'condtime',
                r3(globals.conditionTime()), 'time',
                r3(globals.currentTime())
            ], True)

    def checkInput(self):

        if self.started:
            if not self.responded and self.phase > 1:

                if globals.hasWheel:
                    if globals.joystick.buttons[
                            3]:  #or globals.joystick.buttons[3]:
                        self.processResponse(0)
                    elif globals.joystick.buttons[
                            2]:  #or globals.joystick.buttons[5]:
                        self.processResponse(1)
                    elif globals.joystick.buttons[
                            0]:  #or globals.joystick.buttons[7]:
                        self.processResponse(2)

                if globals.findKey('1') >= 0:
                    self.processResponse(0)
                elif globals.findKey('2') >= 0:
                    self.processResponse(1)
                elif globals.findKey('3') >= 0:
                    self.processResponse(2)

    def setCondition(self, cond):
        self.condition = cond

    def playAudio(self, filename):
        return False

    def audioIsPlaying(self):
        return False
Example #22
0
    def __init__(self, args):

        self._logger = logging.getLogger('Traniner')
        self._checkpoint = {}

        config_name = args.config

        # hardcode the config path just for convinence.
        cfg = Utils.config("./config/" + config_name)

        number_of_planes = cfg['GAME'].getint('number_of_planes')
        board_size = cfg['GAME'].getint('board_size')
        encoder_name = cfg['GAME'].get('encoder_name')

        az_mcts_rounds_per_move = cfg['AZ_MCTS'].getint('rounds_per_move')
        az_mcts_temperature = cfg['AZ_MCTS'].getfloat('temperature')
        c_puct = cfg['AZ_MCTS'].getfloat('C_puct')

        basic_mcts_c_puct = cfg['BASIC_MCTS'].getfloat('C_puct')

        buffer_size = cfg['TRAIN'].getint('buffer_size')

        batch_size = cfg['TRAIN'].getint('batch_size')

        epochs = cfg['TRAIN'].getint('epochs')

        self._basic_mcts_rounds_per_move = cfg['BASIC_MCTS'].getint(
            'rounds_per_move')
        self._latest_checkpoint_file = './checkpoints/' + config_name.split(
            '.')[0] + '/latest.pth.tar'
        self._best_checkpoint_file = './checkpoints/' + config_name.split(
            '.')[0] + '/best.pth.tar'

        check_number_of_games = cfg['EVALUATE'].getint('number_of_games')

        os.makedirs(os.path.dirname(self._latest_checkpoint_file),
                    exist_ok=True)
        os.makedirs(os.path.dirname(self._best_checkpoint_file), exist_ok=True)

        use_cuda = torch.cuda.is_available()
        devices_ids = []
        if use_cuda:
            devices_ids = list(map(int, args.gpu_ids.split(',')))
            num_devices = torch.cuda.device_count()
            if len(devices_ids) > num_devices:
                raise Exception(
                    '#available gpu : {} < --device_ids : {}'.format(
                        num_devices, len(devices_ids)))

        if encoder_name == 'SnapshotEncoder':
            encoder = SnapshotEncoder(number_of_planes, board_size)
            input_shape = (number_of_planes, board_size, board_size)

        if encoder_name == 'DeepMindEncoder':
            encoder = DeepMindEncoder(number_of_planes, board_size)
            input_shape = (number_of_planes * 2 + 1, board_size, board_size)

        if encoder_name == 'BlackWhiteEncoder':
            encoder = BlackWhiteEncoder(number_of_planes, board_size)
            input_shape = (number_of_planes * 2 + 2, board_size, board_size)

        self._model_name = cfg['MODELS'].get('net')
        self._model = ResNet8Network(
            input_shape, board_size * board_size
        ) if self._model_name == 'ResNet8Network' else Simple5Network(
            input_shape, board_size * board_size)

        self._optimizer = Utils.get_optimizer(self._model.parameters(), cfg)

        self._experience_buffer = ExpericenceBuffer(buffer_size)
        self._check_frequence = cfg['TRAIN'].getint('check_frequence')

        self._start_game_index = 1
        self._train_number_of_games = cfg['TRAIN'].getint('number_of_games')

        # Be aware this is not the first time to run this program
        resume = args.resume
        if resume:
            self._checkpoint = torch.load(self._latest_checkpoint_file,
                                          map_location='cpu')
            if self._checkpoint['model_name'] == self._model_name:
                if use_cuda:
                    self._model.to(torch.device('cuda:' + str(devices_ids[0])))
                else:
                    self._model.to(torch.device('cpu'))

                self._model.load_state_dict(self._checkpoint['model'])
                self._optimizer.load_state_dict(self._checkpoint['optimizer'])
                self._basic_mcts_rounds_per_move = self._checkpoint[
                    'basic_mcts_rounds_per_move']

                self._start_game_index = self._checkpoint['game_index']
                self._experience_buffer.data = self._checkpoint[
                    'experience_buffer'].data
                self._logger.debug(
                    'ExpericenceBuffer size is {} when loading from checkpoint'
                    .format(self._experience_buffer.size()))

        writer = SummaryWriter(log_dir='./runs/' + config_name.split('.')[0])

        self._data_collector = DataCollector(encoder, self._model,
                                             az_mcts_rounds_per_move, c_puct,
                                             az_mcts_temperature, board_size,
                                             number_of_planes, devices_ids,
                                             use_cuda)

        self._policy_improver = PolicyImprover(self._model, batch_size, epochs,
                                               devices_ids, use_cuda,
                                               self._optimizer, writer)

        self._policy_checker = PolicyChecker(
            devices_ids, use_cuda, encoder, board_size, number_of_planes,
            self._model, az_mcts_rounds_per_move, c_puct, az_mcts_temperature,
            basic_mcts_c_puct, check_number_of_games, writer)
Example #23
0
class Radio(object):

    def __init__(self, show=-1):
        self.started = False

        self.condition = ''
        self.fullCondition = '?'

        self.preselectedShow = show

        self.clip = 0
        self.lastClip = 0
        self.conversation = 0

        self.player = pyglet.media.ManagedSoundPlayer()
        self.llPlayer = None
        self.clipDuration = 0
        self.clipStartTime = 0
        self.currentClip = None
        self.curShow = None

        #sound = pyglet.resource.media(BALL_SOUND, streaming=False)

        #self.hardConvDB = ['show1', 'show2']
        #self.easyConvDB = ['show3', 'show4']
        #self.convAnswers = []
        self.shows = [['show3', 'show4'], ['show1', 'show2']] # easy / hard
        self.numFragments = [[1,1], [32,34]]
        self.answers = []
        #self.convPool = range(len(self.convDB))

        self.questionOrder = []
        self.questionIndx = 0
        self.correctAnswer = 0

        self.responded = False
        self.playedLetter = False
        self.playedRepeat = False
        self.phase = 0

        self.responseCountdown = 0.0
        self.responseLimit = 5.0
        self.clipInterval = 1.5
        self.isi = 0.0

        self.run = -1
        self.doPractice = False
        self.roadCond = 'simple'

        #self.incorrectSound = pyglet.resource.media('incorrect.wav')
        #self.correctSound = pyglet.resource.media('correct.wav')

        self.db = None

    def setConversation(self, show):
        self.preselectedShow = show

    def clearConversation(self):
        self.preselectedShow = -1

    def startTask(self, cond, driving='?', blockNum=0):
        self.condition = cond
        self.fullCondition = driving+'_'+cond
        self.run += 1

        datacond = '_' + self.roadCond + '_hard'

        if self.doPractice and self.run == 0:
            datacond = '_prac' + datacond

        if cond == 'hard':
            self.db = DataCollector('Radio DB', 'data/'+globals.participant+datacond+'_radio.dat', ['pp', 'cond', 'correct',
                                                                               'answer', 'conversation', 'question',
                                                                               'condtime', 'time'])
            self.db.open()

        self.started = True
        self.pickConversation()

        self.startNextClip()

    def stopTask(self):
        self.started = False

        if not self.llPlayer is None:
            self.llPlayer.terminate()
            self.llPlayer = None

        #if self.player.playing:
            #self.player.next()

        if self.condition == 'hard':
            self.db.close()

    def pickConversation(self):
        cond = 0
        if self.run == 0 and self.doPractice:
            self.curShow = 'practice'
            self.lastClip = 8
            self.clip = -1
        else:
            if self.condition == 'hard':
                cond = 1

            if len(self.shows[cond]) > 0:
                idx = random.sample(range(0,len(self.shows[cond])), 1)[0]
                self.curShow = self.shows[cond][idx]
                self.lastClip = self.numFragments[cond][idx]

                del self.shows[cond][idx]
                del self.numFragments[cond][idx]

                self.clip = -1
            else:
                self.started = False

    def startNextClip(self):
        if self.started:
            self.responseCountdown = 0.0
            self.isi = 0.0
            self.responded = False
            self.playedLetter = False
            self.phase = 0

            if self.clip+1 >= self.lastClip:
                #self.pickConversation()
                self.clip = -1
                self.stopTask()
            else:

                self.clip += 1

                #self.currentClip = pyglet.media.load('radio/'+self.convglobals.db[self.conversation]+'_'+str(self.clip+1)+'.mp3', streaming=False)
                if self.condition == 'hard':
                    currentClip = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+'.mp3'
                    self.playAudio(currentClip)
                else:
                    currentClip = 'radio/'+self.curShow+'/'+self.curShow+'.mp3'
                    self.playAudio(currentClip)
                    #audio_file = os.getcwd()+'/radio/show1/show1frag1q.mp3'
                    #print audio_file

                    #self.playAudio(audio_file)
                    #self.llPlayer = subprocess.Popen(["afplay", audio_file], shell=False)
                    #pid = p.pid()
                    #p.terminate()

    def draw(self):
        return 0

    def update(self, dt):
        if self.started:
            if self.condition == 'hard':
                #print str(self.player.time)+' - '+str(self.currentClip.duration)
                if not self.audioIsPlaying() and not self.responded:
                    if self.phase == 0: # clip finished, play question
                        currentClip = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+'q.mp3'
                        self.playAudio(currentClip)

                        self.questionOrder = [0, 1, 2]
                        random.shuffle(self.questionOrder)
                        #print self.questionOrder
                        self.questionIdx = 0
                        self.correctAnswer = self.questionOrder.index(0)
                        self.phase = 1

                    elif self.phase == 1: # questions finished, play options
                        if self.questionIdx > 2:
                            # repeat question after answers
                            if self.playedRepeat:
                                currentClip = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+'q.mp3'
                                self.playAudio(currentClip)
                                self.playedRepeat = False

                                self.phase = 2 # options finished, go to response phase

                            else: # say 'repeat'
                                currentClip = 'radio/herhaal.mp3'
                                self.playAudio(currentClip)
                                self.playedRepeat = True
                        else:
                            letters = ['a', 'b', 'c']
                            number = self.questionOrder[self.questionIdx]
                            letter = letters[number]

                            if self.playedLetter:
                                currentClip = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+letter+'.mp3'
                                self.playAudio(currentClip)

                                self.playedLetter = False
                                self.questionIdx += 1
                            else: # play the answer letter clip first
                                currentClip = 'radio/'+letters[self.questionIdx]+'.mp3'
                                self.playAudio(currentClip)

                                self.playedLetter = True
                elif self.phase == 2:
                    self.responseCountdown += dt
                    #print self.responseCountdown

                    if self.responseCountdown >= self.responseLimit:
                        self.processResponse(-1)

                elif self.phase == 3: # feedback
                    self.isi += dt
                    if self.isi >= self.clipInterval:
                        self.startNextClip()
            #else: # easy
                #a = 1
                #if not self.player.playing: # get next clip
                    #print 'finished'
                    #self.startNextClip()

    def processResponse(self, button):

        if self.started and not self.responded and self.phase == 2:
            #self.player.pause()
            #self.player.next()
            correct = 0

            let = ['A', 'B', 'C']

            if button >= 0 and button < 4:
                if self.audioIsPlaying():
                    if self.llPlayer is not None:
                        self.llPlayer.terminate()
                        self.llPlayer = None

                print 'Answer '+let[button]+' was selected. Correct answer was '+let[self.correctAnswer]
                if button ==  self.correctAnswer:
                    correct = 1
            else:
                print 'No answer was selected. Correct answer was '+let[self.correctAnswer]

            if correct:
                correctSound = pyglet.resource.media('correct.wav')
                correctSound.play()
                globals.bonusCounter += globals.RADIO_QUIZ_BONUS
                #self.correctSound.play()
            else:
                incorrectSound = pyglet.resource.media('incorrect.wav')
                incorrectSound.play()
                #self.incorrectSound.play()

            self.phase = 3
            self.responded = True

            self.db.addData(['pp', globals.participant, 'cond', self.fullCondition, 'correct', correct,
                            'answer', button, 'conversation', self.conversation, 'question', self.clip,
                            'condtime', r3(globals.conditionTime()), 'time', r3(globals.currentTime())], True)

    def checkInput(self):

        if self.started:
            if self.condition == 'hard':
                if not self.responded and self.phase > 0:

                    if globals.hasWheel:
                        if globals.joystick.buttons[3]: #or globals.joystick.buttons[3]:
                            self.processResponse(0)
                        elif globals.joystick.buttons[2]: #or globals.joystick.buttons[5]:
                            self.processResponse(1)
                        elif globals.joystick.buttons[0]: #or globals.joystick.buttons[7]:
                            self.processResponse(2)

                    if globals.findKey('1') >= 0:
                        self.processResponse(0)
                    elif globals.findKey('2') >= 0:
                        self.processResponse(1)
                    elif globals.findKey('3') >= 0:
                        self.processResponse(2)

    def setCondition(self, cond):
        self.condition = cond

    def playAudio(self, filename):
        #filename2 = 'radio/'+self.curShow+'/'+self.curShow+'frag'+str(self.clip+1)+'.ogg'
        clip = pyglet.media.load(filename, streaming=False)
        self.clipDuration = clip.duration
        #print 'clip duration'
        #print clip.duration
        #audio = MP3(filename2)
        #print 'mutagen'
        #print audio.info.length
        #txt = ''
        #try:
            #subprocess.check_output(["ffmpeg", "-i", filename])
        #except subprocess.CalledProcessError, e:
            ##print "Ping stdout output:\n", e.output
            #txt = e.output

        #ffmpeg -i show1frag1.mp3 2>&1|sed -n "s/.*Duration: \([^,]*\).*/\1/p"
        #print txt
        self.clipStartTime = time.time()
        #print 'clip start time'
        #print self.clipStartTime
        clip = None

        if globals.onOSX:
            # using command prompt because pyglet media player is really buggy...
            self.llPlayer = subprocess.Popen(["afplay", filename], shell=False)
            if self.clipDuration > 5.0:
                print 'Audio PID: '+str(self.llPlayer.pid)+' (kill with "kill <PID>" in bash)'
        else:
            # windows requires mplayer to be installed
            self.llPlayer = subprocess.Popen(["mplayer", filename, "-ss", "30"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)

    def audioIsPlaying(self):
        if not self.llPlayer is None:
            tm = time.time()
            #print str(tm-self.clipStartTime)+' - '+str(tm)+' - '+str(self.clipStartTime+self.clipDuration)
            if tm-self.clipStartTime >= self.clipDuration:
                #print 'end time'
                #print tm
                if self.llPlayer is not None:
                    if not globals.onOSX:
                        self.llPlayer.stdin.write("q")    
                    self.llPlayer.terminate()
                    self.llPlayer = None

                return False

        return True
            "Logged %skB" % (loggedBytes / 1024)
        ]
        
        print "\n".join(message)
        
        display.write_message(message)

        datafile = "%s/%s.log_v2" % (dataPath, _getCurrentTime())

        loggedBytes += datacollector.write_data_log(
            datafile,
            nbrOfOBDFrames=50000,
            messagesPerTimestamp=50
        )

if __name__=="__main__":

    ## make sure the script is called correctly
    if 2 != len(sys.argv):
        raise OSError("[ERROR] Correct usage:\n  python obd2collector <data directory>")

    dataPath      = sys.argv[1]
    datacollector = DataCollector()
    display       = Display()

    try:
        main(dataPath, datacollector, display)
    except KeyboardInterrupt:
        ## close all threads (hopefully)
        datacollector.shutdown()
        display.shutdown()