示例#1
0
 def run(self):
     try:
         if VSCODE_DEBUG_QT_THREADS:
             ptvsd.debug_this_thread()
         retval = self.fun()
     except Exception as e:
         self.signals.error.emit(e)
     else:
         self.signals.result.emit(retval)
     finally:
         self.signals.finished.emit()
    def replay_thread(self):

        # Necessary only for debugging in Visual Studio Code IDE
        try:
            ptvsd.debug_this_thread()
        except:
            pass

        # Check what is in the csv textbox
        csv_name = self.textbox.text()

        # Load experiment history
        history_pd = self.CartPoleInstance.load_history_csv(csv_name=csv_name)

        # Augment the experiment history with simulation time step size
        dt = []
        row_iterator = history_pd.iterrows()
        _, last = next(row_iterator)  # take first item from row_iterator
        for i, row in row_iterator:
            dt.append(row['time'] - last['time'])
            last = row
        dt.append(dt[-1])
        history_pd['dt'] = np.array(dt)

        # Initialize loop timer (with arbitrary dt)
        replay_looper = loop_timer(dt_target=0.0)

        # Start looping over history
        replay_looper.start_loop()
        for index, row in history_pd.iterrows():
            self.CartPoleInstance.s.position = row['s.position']
            self.CartPoleInstance.s.positionD = row['s.positionD']
            self.CartPoleInstance.s.angle = row['s.angle']
            self.CartPoleInstance.time = row['time']
            self.CartPoleInstance.dt = row['dt']
            self.CartPoleInstance.u = row['u']
            self.CartPoleInstance.Q = row['Q']
            self.CartPoleInstance.target_position = row['target_position']
            self.CartPoleInstance.slider_value = self.CartPoleInstance.target_position

            dt_target = (self.CartPoleInstance.dt / self.speedup)
            replay_looper.dt_target = dt_target

            replay_looper.sleep_leftover_time()

            if self.terminate_experiment_or_replay_thread:  # Means that stop button was pressed
                break

        if self.show_experiment_summary:
            self.CartPoleInstance.dict_history = history_pd.loc[:index].to_dict(
                orient='list')

        self.experiment_or_replay_thread_terminated = True
    def run(self):
        logger.debug('Prepare inference')

        try:
            import ptvsd
            ptvsd.debug_this_thread()
        except:
            pass

        training_folder = self.data['training_folder']
        input_image_file = self.data['input_image_file']

        config_file = os.path.join(training_folder,
                                   Training.config('config_file'))

        network = Network()
        network.setAbortable(self.abortable)
        network.setThread(self.thread)

        network_config = network.loadConfig(config_file)
        self.thread.data.emit({'network_config': network_config})

        architecture_file = ''
        weights_file = ''
        files = network_config.files
        for f in files:
            if '.json' in f:
                architecture_file = os.path.join(training_folder, f)
            elif '.params' in f:
                weights_file = os.path.join(training_folder, f)

        dataset_folder = network_config.dataset

        inference_data = Map({
            'input_image_file': input_image_file,
            'architecture_file': architecture_file,
            'weights_file': weights_file,
            'labels': network_config.labels,
        })

        self.thread.update.emit(_('Validating ...'), 1, 3)

        network.inference(inference_data.input_image_file,
                          inference_data.labels,
                          inference_data.architecture_file,
                          inference_data.weights_file,
                          args=None)

        self.thread.update.emit(_('Finished'), 3, 3)
示例#4
0
 def run(self):
     # This try/except is required to debug threds with vscode - it should be removed in release
     try:
         import ptvsd
         ptvsd.debug_this_thread()
     except:
         pass
     if self._passed_arguments and not self._kwargs:
         self._passed_method(self._passed_arguments)
     elif not self._passed_arguments and self._kwargs:
         self._passed_method(**self._kwargs)
     elif self._passed_arguments and self._kwargs:
         self._passed_method(self._passed_arguments, **self._kwargs)
     else:
         self._passed_method()
示例#5
0
 def run(self):
     import ptvsd
     ptvsd.debug_this_thread()
     sth = getFlowInfo(self.packet)
     target = []
     for i in sth['Flow']:
         i.attrforDL()
         target.append(preprocess(i.features))
     result = predict(target)
     with open('result.txt', 'w') as infile:
         for i in range(0, len(sth['Flow'])):
             infile.write(sth['Flow'][i].flowID + ',[' +
                          str(result[i][0][0]) + ' ' +
                          str(result[i][0][1]) + ' ' +
                          str(result[i][0][2]) + ' ' +
                          str(result[i][0][3]) + ' ' +
                          str(result[i][0][4]) + ']\n')
     self.signal.doneSignal.emit()
    def experiment_thread(self):

        # Necessary only for debugging in Visual Studio Code IDE
        try:
            ptvsd.debug_this_thread()
        except:
            pass

        self.looper.start_loop()
        while not self.terminate_experiment_or_replay_thread:
            if self.pause_experiment_or_replay_thread:
                time.sleep(0.1)
            else:
                # Calculations of the Cart state in the next timestep
                self.CartPoleInstance.update_state()

                # Terminate thread if random experiment reached its maximal length
                if ((self.CartPoleInstance.use_pregenerated_target_position is
                     True) and (self.CartPoleInstance.time >=
                                self.CartPoleInstance.t_max_pre)):
                    self.terminate_experiment_or_replay_thread = True

                # FIXME: when Speedup empty in GUI I expected inf speedup but got error Loop timer was not initialized properly
                self.looper.sleep_leftover_time()

        # Save simulation history if user chose to do so at the end of the simulation
        if self.save_history:
            csv_name = self.textbox.text()
            self.CartPoleInstance.save_history_csv(
                csv_name=csv_name,
                mode='init',
                length_of_experiment=np.around(
                    self.CartPoleInstance.dict_history['time'][-1],
                    decimals=2))
            self.CartPoleInstance.save_history_csv(csv_name=csv_name,
                                                   mode='save offline')

        self.experiment_or_replay_thread_terminated = True
示例#7
0
 def foo(x):
     ptvsd.debug_this_thread()
     event.set()  # @bp
     return 0
    def replay_thread(self):

        # Necessary only for debugging in Visual Studio Code IDE
        try:
            ptvsd.debug_this_thread()
        except:
            pass

        # Check what is in the csv textbox
        csv_name = self.textbox.text()

        # Load experiment history
        history_pd, filepath = self.CartPoleInstance.load_history_csv(
            csv_name=csv_name)

        # Set cartpole in the right mode (just to ensure slider behaves properly)
        with open(filepath, newline='') as f:
            reader = csv.reader(f)
            for line in reader:
                line = line[0]
                if line[:len('# Controller: ')] == '# Controller: ':
                    controller_set = self.CartPoleInstance.set_controller(
                        line[len('# Controller: '):].rstrip("\n"))
                    if controller_set:
                        self.rbs_controllers[self.CartPoleInstance.
                                             controller_idx].setChecked(True)
                    else:
                        self.rbs_controllers[1].setChecked(
                            True)  # Set first, but not manual stabilization
                    break

        # Augment the experiment history with simulation time step size
        dt = []
        row_iterator = history_pd.iterrows()
        _, last = next(row_iterator)  # take first item from row_iterator
        for i, row in row_iterator:
            dt.append(row['time'] - last['time'])
            last = row
        dt.append(dt[-1])
        history_pd['dt'] = np.array(dt)

        # Initialize loop timer (with arbitrary dt)
        replay_looper = loop_timer(dt_target=0.0)

        # Start looping over history
        replay_looper.start_loop()
        global L
        for index, row in history_pd.iterrows():
            self.CartPoleInstance.s[POSITION_IDX] = row['position']
            self.CartPoleInstance.s[POSITIOND_IDX] = row['positionD']
            self.CartPoleInstance.s[ANGLE_IDX] = row['angle']
            self.CartPoleInstance.time = row['time']
            self.CartPoleInstance.dt = row['dt']
            try:
                self.CartPoleInstance.u = row['u']
            except KeyError:
                pass
            self.CartPoleInstance.Q = row['Q']
            self.CartPoleInstance.target_position = row['target_position']
            if self.CartPoleInstance.controller_name == 'manual-stabilization':
                self.CartPoleInstance.slider_value = self.CartPoleInstance.Q
            else:
                self.CartPoleInstance.slider_value = self.CartPoleInstance.target_position / TrackHalfLength

            # TODO: Make it more general for all possible parameters
            try:
                L[...] = row['L']
            except KeyError:
                pass
            except:
                print('Error while assigning L')
                print("Unexpected error:", sys.exc_info()[0])
                print("Unexpected error:", sys.exc_info()[1])

            dt_target = (self.CartPoleInstance.dt / self.speedup)
            replay_looper.dt_target = dt_target

            replay_looper.sleep_leftover_time()

            if self.terminate_experiment_or_replay_thread:  # Means that stop button was pressed
                break

            while self.pause_experiment_or_replay_thread:  # Means that pause button was pressed
                time.sleep(0.1)

        if self.show_experiment_summary:
            self.CartPoleInstance.dict_history = history_pd.loc[:index].to_dict(
                orient='list')

        self.experiment_or_replay_thread_terminated = True
示例#9
0
    def run(self):
        logger.debug('Prepare training')

        try:
            import ptvsd
            ptvsd.debug_this_thread()
        except:
            pass

        network_key = self.data['network']
        if network_key not in Training.config('objects'):
            self.thread.message.emit(
                _('Training'),
                _('Network {} could not be found').format(network_key),
                MessageType.Error)
            self.abort()
            return

        # Training settings
        gpus = []
        gpu_checkboxes = self.data['gpu_checkboxes']
        for i, gpu in enumerate(gpu_checkboxes):
            if gpu.checkState() == Qt.Checked:
                gpus.append(str(i))
        gpus = ','.join(gpus)
        epochs = int(self.data['args_epochs'])
        batch_size = int(self.data['args_batch_size'])

        # Dataset
        dataset_format = self.data['selected_format']
        train_dataset_obj = Export.config('objects')[dataset_format]()
        train_dataset_obj.setInputFolderOrFile(self.data['train_dataset'])
        if self.data['val_dataset']:
            val_dataset_obj = Export.config('objects')[dataset_format]()
            val_dataset_obj.setInputFolderOrFile(self.data['val_dataset'])

        labels = train_dataset_obj.getLabels()
        num_train_samples = train_dataset_obj.getNumSamples()
        num_batches = int(math.ceil(num_train_samples / batch_size))

        args = Map({
            'network':
            self.data['network'],
            'train_dataset':
            self.data['train_dataset'],
            'validate_dataset':
            self.data['val_dataset'],
            'training_name':
            self.data['training_name'],
            'batch_size':
            batch_size,
            'learning_rate':
            float(self.data['args_learning_rate']),
            'gpus':
            gpus,
            'epochs':
            epochs,
            'early_stop_epochs':
            int(self.data['args_early_stop_epochs']),
            'start_epoch':
            self.data['start_epoch'],
            'resume':
            self.data['resume_training'],
        })

        self.thread.update.emit(_('Loading data ...'), 0,
                                epochs * num_batches + 5)

        with Training.config('objects')[network_key]() as network:
            network.setAbortable(self.abortable)
            network.setThread(self.thread)
            network.setArgs(args)
            network.setOutputFolder(self.data['output_folder'])
            network.setTrainDataset(train_dataset_obj, dataset_format)
            network.setLabels(labels)

            if self.data['val_dataset']:
                network.setValDataset(val_dataset_obj)

            self.checkAborted()

            network.training()
示例#10
0
from .BlenderVisitor import BlenderVisitor

# Taken from https://github.com/Microsoft/PTVS/wiki/Cross-Platform-Remote-Debugging
# Now moved to https://docs.microsoft.com/en-us/visualstudio/python/debugging-cross-platform-remote
# Project repository at https://github.com/Microsoft/ptvsd
# Install latest version from pypi at https://pypi.org/project/ptvsd/
#
# From Visual Studio 2019 or Visuals Studio Code: Attach to PTSV Python Remote debuggee using "tcp://localhost:5678" (NO Secret!)
try:
    import ptvsd
except Exception:
    print('PTSV Debugging disabled: import ptvsd failed')

try:
    # accoording to https://code.visualstudio.com/docs/python/debugging#_troubleshooting
    ptvsd.debug_this_thread()
except Exception:
    print('PTSV Debugging disabled: ptvsd.debug_this_thread() failed')

try:
    #    ptvsd.enable_attach(secret=None) With ptvsd version 4 and upwards secret is no longer a named parameter
    ptvsd.enable_attach()
    print('PTSV Debugging enabled')
except Exception as e:
    print('PTSV Debugging disabled: ptvsd.enable_attach failed:')
    print(e)


class ExportFUS(bpy.types.Operator, ExportHelper):
    #class attributes
    bl_idname = "export_scene.fus"
示例#11
0
    def run(self):
        logger.debug('Prepare import')

        try:
            import ptvsd
            ptvsd.debug_this_thread()
        except:
            pass

        data_folder_or_file = self.data['data_folder']
        is_data_folder_valid = True
        if not data_folder_or_file:
            is_data_folder_valid = False
        data_folder_or_file = os.path.normpath(data_folder_or_file)
        if not (os.path.isdir(data_folder_or_file)
                or os.path.isfile(data_folder_or_file)):
            is_data_folder_valid = False
        if not is_data_folder_valid:
            self.thread.message.emit(
                _('Import'), _('Please enter a valid dataset file or folder'),
                MessageType.Warning)
            self.abort()
            return

        output_folder = self.data['output_folder']
        is_output_folder_valid = True
        if not output_folder:
            is_output_folder_valid = False
        output_folder = os.path.normpath(output_folder)
        if not os.path.isdir(output_folder):
            is_output_folder_valid = False
        if not is_output_folder_valid:
            self.thread.message.emit(_('Import'),
                                     _('Please enter a valid output folder'),
                                     MessageType.Warning)
            self.abort()
            return

        selected_format = self.data['selected_format']
        all_formats = Export.config('formats')
        inv_formats = Export.invertDict(all_formats)
        if selected_format not in inv_formats:
            self.thread.message.emit(
                _('Import'),
                _('Import format {} could not be found').format(
                    selected_format), MessageType.Warning)
            self.abort()
            return
        else:
            self.data['format_name'] = inv_formats[selected_format]
        format_name = self.data['format_name']

        # Dataset
        dataset_format = Export.config('objects')[format_name]()
        if not dataset_format.isValidFormat(data_folder_or_file):
            self.thread.message.emit(_('Import'), _('Invalid dataset format'),
                                     MessageType.Warning)
            self.abort()
            return

        dataset_format.setAbortable(self.abortable)
        dataset_format.setThread(self.thread)
        dataset_format.setOutputFolder(output_folder)
        dataset_format.setInputFolderOrFile(data_folder_or_file)

        self.checkAborted()

        dataset_format.importFolder()
示例#12
0
    def run(self):
        logger.debug('Start import from directory')

        try:
            import ptvsd
            ptvsd.debug_this_thread()
        except:
            pass

        data = Map(self.data)
        num_images = len(data.images)
        pattern = data.pattern
        output_dir = data.output_dir
        filters = data.filters

        filter_label_func = self.acceptAll
        if 'label' in filters and not filters['label'] == StatisticsModel.STATISTICS_FILTER_ALL:
            filter_label_func = self.acceptLabel

        image_count = 0
        all_shapes = []
        items = []

        self.checkAborted()

        for i, filename in enumerate(data.images):

            self.thread.update.emit(None, i, num_images)
            self.checkAborted()

            # Search pattern
            if pattern and pattern.lower() not in filename.lower(): # re.search(pattern, filename, re.IGNORECASE) == None:
                continue

            label_file = os.path.splitext(filename)[0] + '.json'
            if output_dir:
                label_file_without_path = os.path.basename(label_file)
                label_file = os.path.normpath(os.path.join(output_dir, label_file_without_path))

            # ListItem
            item = QtWidgets.QListWidgetItem(filename)
            item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
            item.setCheckState(Qt.Unchecked)

            self.checkAborted()

            shapes = []
            has_labels = False
            labels_for_image = set([])
            label_file_exists = os.path.isfile(label_file)

            # Labels
            if label_file_exists:
                labelFile = LabelFile(label_file)
                for label, points, line_color, fill_color, shape_type, flags in labelFile.shapes:
                    if filter_label_func(label):
                        has_labels = True
                        shape = Shape(label=label, shape_type=shape_type)
                        shapes.append(shape)
                        labels_for_image.add(label)

            # Filters
            if 'label' in filters and not filters['label'] == StatisticsModel.STATISTICS_FILTER_ALL:
                if not filters['label'] in labels_for_image:
                    continue
            if 'has_label' in filters:
                if filters['has_label'] == StatisticsModel.STATISTICS_FILTER_LABELED and not has_labels:
                    continue
                if filters['has_label'] == StatisticsModel.STATISTICS_FILTER_UNLABELED and has_labels:
                    continue

            image_count += 1
            items.append(item)
            if has_labels:
                item.setCheckState(Qt.Checked)
                all_shapes.append(shapes)

            if image_count % data['update_interval'] == 0:
                self.thread.data.emit({
                    'items': items,
                    'num_images': image_count,
                    'all_shapes': all_shapes,
                })
                image_count = 0
                all_shapes = []
                items = []

            self.checkAborted()

        self.thread.data.emit({
            'num_images': image_count,
            'all_shapes': all_shapes,
            'items': items,
        })
示例#13
0
    def run(self):
        logger.debug('Prepare export')

        try:
            import ptvsd
            ptvsd.debug_this_thread()
        except:
            pass

        data_folder = self.data['data_folder']
        is_data_folder_valid = True
        if not data_folder:
            is_data_folder_valid = False
        data_folder = os.path.normpath(data_folder)
        if not os.path.isdir(data_folder):
            is_data_folder_valid = False
        if not is_data_folder_valid:
            self.thread.message.emit(_('Export'),
                                     _('Please enter a valid data folder'),
                                     MessageType.Warning)
            self.abort()
            return

        export_folder = self.data['export_folder']
        is_export_folder_valid = True
        if not export_folder:
            is_export_folder_valid = False
        export_folder = os.path.normpath(export_folder)
        if not os.path.isdir(export_folder):
            is_export_folder_valid = False
        if not is_export_folder_valid:
            self.thread.message.emit(_('Export'),
                                     _('Please enter a valid export folder'),
                                     MessageType.Warning)
            self.abort()
            return

        selected_labels = self.data['selected_labels']
        num_selected_labels = len(selected_labels)
        limit = self.data['max_num_labels']
        if num_selected_labels > limit:
            self.thread.message.emit(
                _('Export'),
                _('Please select a maximum of {} labels').format(limit),
                MessageType.Warning)
            self.abort()
            return
        elif num_selected_labels <= 0:
            self.thread.message.emit(_('Export'),
                                     _('Please select at least 1 label'),
                                     MessageType.Warning)
            self.abort()
            return

        dataset_name = replace_special_chars(self.data['dataset_name'])
        if not dataset_name:
            self.thread.message.emit(_('Export'),
                                     _('Please enter a valid dataset name'),
                                     MessageType.Warning)
            self.abort()
            return

        export_dataset_folder = os.path.normpath(
            os.path.join(self.data['export_folder'],
                         self.data['dataset_name']))
        if not os.path.isdir(export_dataset_folder):
            os.makedirs(export_dataset_folder)
        elif len(os.listdir(export_dataset_folder)) > 0:
            msg = _(
                'The selected output directory "{}" is not empty. All containing files will be deleted. Are you sure to continue?'
            ).format(export_dataset_folder)
            if self.doConfirm(_('Export'), msg, MessageType.Warning):
                deltree(export_dataset_folder)
                time.sleep(0.5)  # wait for deletion to be finished
                if not os.path.exists(export_dataset_folder):
                    os.makedirs(export_dataset_folder)
            else:
                self.abort()
                return

        if not os.path.isdir(export_dataset_folder):
            self.thread.message.emit(
                _('Export'),
                _('The selected output directory "{}" could not be created').
                format(export_dataset_folder), MessageType.Warning)
            self.abort()
            return

        selected_format = self.data['selected_format']
        all_formats = Export.config('formats')
        inv_formats = Export.invertDict(all_formats)
        if selected_format not in inv_formats:
            self.thread.message.emit(
                _('Export'),
                _('Export format {} could not be found').format(
                    selected_format), MessageType.Warning)
            self.abort()
            return
        else:
            self.data['format_name'] = inv_formats[selected_format]

        logger.debug('Start export')

        selected_labels = self.data['selected_labels']
        validation_ratio = self.data['validation_ratio']
        data_folder = self.data['data_folder']
        format_name = self.data['format_name']

        self.checkAborted()

        intermediate = IntermediateFormat()
        intermediate.setAbortable(self.abortable)
        intermediate.setThread(self.thread)
        intermediate.setIncludedLabels(selected_labels)
        intermediate.setValidationRatio(validation_ratio)
        intermediate.addFromLabelFiles(data_folder, shuffle=False)

        self.thread.update.emit(_('Loading data ...'), 0,
                                intermediate.getNumberOfSamples() + 5)

        args = Map({
            'validation_ratio': validation_ratio,
        })

        dataset_format = Export.config('objects')[format_name]()
        dataset_format.setAbortable(self.abortable)
        dataset_format.setThread(self.thread)
        dataset_format.setIntermediateFormat(intermediate)
        dataset_format.setInputFolderOrFile(data_folder)
        dataset_format.setOutputFolder(export_dataset_folder)
        dataset_format.setArgs(args)

        self.checkAborted()

        dataset_format.export()