示例#1
0
def getclusting():
    import sys, subprocess
    exefilepath = '/home/mayang/mxnet/example/image-classification/clusting/clusting.py'
    args = [sys.executable, exefilepath]
    p = subprocess.Popen(
        args,
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
    )
    x = ''
    while p.poll() is None:
        for line in utils.nonblocking_readlines(p.stdout):
            if line is not None:
                x = x + line
    x = x.rstrip('\n')
    list = x.split('\n')
    l = len(list)
    print l
    for i in range(3):
        print list[i]

    return flask.render_template(
        'models/images/classification/clustinginfo.html', clusting=list, l=l)
示例#2
0
def getnodesinfo():
    import sys, subprocess
    exefilepath = '/home/mayang/mxnet/example/image-classification/clusting/get_nodes.py'
    args = [sys.executable, exefilepath]
    p = subprocess.Popen(
        args,
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
    )
    x = ''
    while p.poll() is None:
        for line in utils.nonblocking_readlines(p.stdout):
            if line is not None:
                x = x + line
    node_row = x.split('\n')
    node_dim = []
    for i in range(len(node_row)):
        node_dim.extend(node_row[i].split(','))
    l = len(node_dim) / 10
    print node_dim
    print '!!!!!!'
    return flask.render_template('models/images/classification/nodeinfo.html',
                                 list=node_dim,
                                 l=l)
示例#3
0
    def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None):
        """
        Classify an image
        Returns (predictions, visualizations)
            predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
            visualizations -- an array of (layer_name, activations, weights) for the specified layers
        Returns (None, None) if something goes wrong

        Arguments:
        image -- a np.array

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        layers -- which layer activation[s] and weight[s] to visualize
        """
        temp_image_handle, temp_image_path = tempfile.mkstemp(suffix='.png')
        os.close(temp_image_handle)
        image = PIL.Image.fromarray(image)
        try:
            image.save(temp_image_path, format='png')
        except KeyError:
            error_message = 'Unable to save file to "%s"' % temp_image_path
            self.logger.error(error_message)
            raise digits.inference.errors.InferenceError(error_message)

        if config_value('torch_root') == '<PATHS>':
            torch_bin = 'th'
        else:
            torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')

        file_to_load = self.get_snapshot(snapshot_epoch)

        args = [torch_bin,
                os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','wrapper.lua'),
                'test.lua',
                '--image=%s' % temp_image_path,
                '--network=%s' % self.model_file.split(".")[0],
                '--networkDirectory=%s' % self.job_dir,
                '--snapshot=%s' % file_to_load,
                '--allPredictions=yes',
                ]
        if hasattr(self.dataset, 'labels_file'):
            args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))

        if self.use_mean != 'none':
            filename = self.create_mean_file()
            args.append('--mean=%s' % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))

        if self.use_mean == 'pixel':
            args.append('--subtractMean=pixel')
        elif self.use_mean == 'image':
            args.append('--subtractMean=image')
        else:
            args.append('--subtractMean=none')

        if self.crop_size:
            args.append('--crop=yes')
            args.append('--croplen=%d' % self.crop_size)

        if layers=='all':
            args.append('--visualization=yes')
            args.append('--save=%s' % self.job_dir)

        # Convert them all to strings
        args = [str(x) for x in args]

        regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)   #TODO: need to include regular expression for MAC color codes
        self.logger.info('%s classify one task started.' % self.get_framework_id())

        unrecognized_output = []
        predictions = []
        self.visualization_file = None

        env = os.environ.copy()

        if gpu is not None:
            args.append('--type=cuda')
            # make only the selected GPU visible
            env['CUDA_VISIBLE_DEVICES'] = "%d" % gpu
        else:
            args.append('--type=float')

        p = subprocess.Popen(args,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                cwd=self.job_dir,
                close_fds=True,
                env=env,
                )

        try:
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    if self.aborted.is_set():
                        p.terminate()
                        raise digits.inference.errors.InferenceError('%s classify one task got aborted. error code - %d' % (self.get_framework_id(), p.returncode))

                    if line is not None:
                        # Remove color codes and whitespace
                        line=regex.sub('', line).strip()
                    if line:
                        if not self.process_test_output(line, predictions, 'one'):
                            self.logger.warning('%s classify one task unrecognized input: %s' % (self.get_framework_id(), line.strip()))
                            unrecognized_output.append(line)
                    else:
                        time.sleep(0.05)

        except Exception as e:
            if p.poll() is None:
                p.terminate()
            error_message = ''
            if type(e) == digits.inference.errors.InferenceError:
                error_message = e.__str__()
            else:
                error_message = '%s classify one task failed with error code %d \n %s' % (self.get_framework_id(), p.returncode, str(e))
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = '\n'.join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)

        finally:
            self.after_test_run(temp_image_path)

        if p.returncode != 0:
            error_message = '%s classify one task failed with error code %d' % (self.get_framework_id(), p.returncode)
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = '\n'.join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)
        else:
            self.logger.info('%s classify one task completed.' % self.get_framework_id())

        predictions = {'output': np.array(predictions)}

        visualizations = []

        if layers=='all' and self.visualization_file:
            vis_db = h5py.File(self.visualization_file, 'r')
            # the HDF5 database is organized as follows:
            # <root>
            # |- layers
            #    |- 1
            #    |  |- name
            #    |  |- activations
            #    |  |- weights
            #    |- 2
            for layer_id,layer in vis_db['layers'].items():
                layer_desc = layer['name'][...].tostring()
                if 'Sequential' in layer_desc or 'Parallel' in layer_desc:
                    # ignore containers
                    continue
                idx = int(layer_id)
                # activations
                if 'activations' in layer:
                    data = np.array(layer['activations'][...])
                    # skip batch dimension
                    if len(data.shape)>1 and data.shape[0]==1:
                        data = data[0]
                    vis = utils.image.get_layer_vis_square(data)
                    mean, std, hist = self.get_layer_statistics(data)
                    visualizations.append(
                                             {
                                                 'id':         idx,
                                                 'name':       layer_desc,
                                                 'vis_type':   'Activations',
                                                 'vis': vis,
                                                 'data_stats': {
                                                                  'shape':      data.shape,
                                                                  'mean':       mean,
                                                                  'stddev':     std,
                                                                  'histogram':  hist,
                                                 }
                                             }
                                         )
                # weights
                if 'weights' in layer:
                    data = np.array(layer['weights'][...])
                    if 'Linear' not in layer_desc:
                        vis = utils.image.get_layer_vis_square(data)
                    else:
                        # Linear (inner product) layers have too many weights
                        # to display
                        vis = None
                    mean, std, hist = self.get_layer_statistics(data)
                    parameter_count = reduce(operator.mul, data.shape, 1)
                    if 'bias' in layer:
                        bias = np.array(layer['bias'][...])
                        parameter_count += reduce(operator.mul, bias.shape, 1)
                    visualizations.append(
                                           {
                                               'id':          idx,
                                               'name':        layer_desc,
                                               'vis_type':    'Weights',
                                               'vis':  vis,
                                               'param_count': parameter_count,
                                               'data_stats': {
                                                                 'shape':      data.shape,
                                                                 'mean':       mean,
                                                                 'stddev':     std,
                                                                 'histogram':  hist,
                                               }
                                           }
                                         )
            # sort by layer ID
            visualizations = sorted(visualizations,key=lambda x:x['id'])
        return (predictions,visualizations)
示例#4
0
    def get_network_visualization(self, desc):
        """
        return visualization of network
        """
        # save network description to temporary file
        temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.lua')
        os.write(temp_network_handle, desc)
        os.close(temp_network_handle)

        try: # do this in a try..finally clause to make sure we delete the temp file
            # build command line
            if config_value('torch_root') == '<PATHS>':
                torch_bin = 'th'
            else:
                torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')

            args = [torch_bin,
                    os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','main.lua'),
                    '--network=%s' % os.path.splitext(os.path.basename(temp_network_path))[0],
                    '--networkDirectory=%s' % os.path.dirname(temp_network_path),
                    '--subtractMean=none', # we are not providing a mean image
                    '--visualizeModel=yes',
                    '--type=float'
                    ]

            # execute command
            p = subprocess.Popen(args,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.STDOUT,
                        close_fds=True,
                        )

            regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)   #TODO: need to include regular expression for MAC color codes

            # the network description will be accumulated from the command output
            # when collecting_net_definition==True
            collecting_net_definition = False
            desc = []
            unrecognized_output = []
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    if line is not None:
                        # Remove whitespace and color codes. color codes are appended to beginning and end of line by torch binary i.e., 'th'. Check the below link for more information
                        # https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ
                        line = regex.sub('', line)
                        timestamp, level, message = TorchTrainTask.preprocess_output_torch(line.strip())
                        if message:
                            if message.startswith('Network definition'):
                                collecting_net_definition = not collecting_net_definition
                        else:
                            if collecting_net_definition:
                                desc.append(line)
                            elif len(line):
                                unrecognized_output.append(line)
                    else:
                        time.sleep(0.05)

            if not len(desc):
                # we did not find a network description
                raise NetworkVisualizationError(''.join(unrecognized_output))
            else:
                output = flask.Markup('<pre>')
                for line in desc:
                    output += flask.Markup.escape(line)
                output += flask.Markup('</pre>')
                return output
        finally:
            os.remove(temp_network_path)
示例#5
0
    def classify_many(self, images, snapshot_epoch=None):
        """
        Returns (labels, results):
        labels -- an array of strings
        results -- a 2D np array:
            [
                [image0_label0_confidence, image0_label1_confidence, ...],
                [image1_label0_confidence, image1_label1_confidence, ...],
                ...
            ]

        Arguments:
        images -- a list of np.arrays

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        """

        # create a temporary folder to store images and a temporary file
        # to store a list of paths to the images
        temp_dir_path = tempfile.mkdtemp()
        try: # this try...finally clause is used to clean up the temp directory in any case
            _, temp_imgfile_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.txt')
            temp_imgfile = open(temp_imgfile_path, "w")
            for image in images:
                _, temp_image_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.jpeg')
                image = PIL.Image.fromarray(image)
                try:
                    image.save(temp_image_path, format='jpeg')
                except KeyError:
                    error_message = 'Unable to save file to "%s"' % temp_image_path
                    self.logger.error(error_message)
                    raise digits.frameworks.errors.InferenceError(error_message)
                temp_imgfile.write("%s\n" % temp_image_path)
            temp_imgfile.close()

            if config_value('torch_root') == '<PATHS>':
                torch_bin = 'th'
            else:
                torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')

            args = [torch_bin,
                    os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','test.lua'),
                    '--testMany=yes',
                    '--allPredictions=yes',   #all predictions are grabbed and formatted as required by DIGITS
                    '--image=%s' % str(temp_imgfile_path),
                    '--resizeMode=%s' % str(self.dataset.resize_mode),   # Here, we are using original images, so they will be resized in Torch code. This logic needs to be changed to eliminate the rework of resizing. Need to find a way to send python images array to Lua script efficiently
                    '--network=%s' % self.model_file.split(".")[0],
                    '--networkDirectory=%s' % self.job_dir,
                    '--load=%s' % self.job_dir,
                    '--snapshotPrefix=%s' % self.snapshot_prefix,
                    ]

            if isinstance(self.dataset, ImageClassificationDatasetJob):
                labels = self.get_labels()         #TODO: probably we no need to return this, as we can directly access from the calling function
                args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
                args.append('--mean=%s' % self.dataset.path(constants.MEAN_FILE_IMAGE))
                if self.use_mean:
                    args.append('--subtractMean=yes')
                else:
                    args.append('--subtractMean=no')
            elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
                if self.use_mean:
                    args.append('--mean=%s' % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))
                    args.append('--subtractMean=yes')
                else:
                    args.append('--subtractMean=no')

            if snapshot_epoch:
                args.append('--epoch=%d' % int(snapshot_epoch))
            if TORCH_USE_MEAN_PIXEL:
                args.append('--useMeanPixel=yes')
            if self.trained_on_cpu:
                args.append('--type=float')

            # input images have been resized to network input dimensions by caller
            args.append('--crop=no')

            # Convert them all to strings
            args = [str(x) for x in args]

            regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)   #TODO: need to include regular expression for MAC color codes
            self.logger.info('%s classify many task started.' % self.name())

            unrecognized_output = []
            predictions = []
            p = subprocess.Popen(args,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                    cwd=self.job_dir,
                    close_fds=True,
                    )

            try:
                while p.poll() is None:
                    for line in utils.nonblocking_readlines(p.stdout):
                        if self.aborted.is_set():
                            p.terminate()
                            raise digits.frameworks.errors.InferenceError('%s classify many task got aborted. error code - %d' % (self.get_framework_id(), p.returncode()))

                        if line is not None:
                            # Remove whitespace and color codes. color codes are appended to begining and end of line by torch binary i.e., 'th'. Check the below link for more information
                            # https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ
                            line=regex.sub('', line).strip()
                        if line:
                            if not self.process_test_output(line, predictions, 'many'):
                                self.logger.warning('%s classify many task unrecognized input: %s' % (self.get_framework_id(), line.strip()))
                                unrecognized_output.append(line)
                        else:
                            time.sleep(0.05)
            except Exception as e:
                if p.poll() is None:
                    p.terminate()
                error_message = ''
                if type(e) == digits.frameworks.errors.InferenceError:
                    error_message = e.__str__()
                else:
                    error_message = '%s classify many task failed with error code %d \n %s' % (self.get_framework_id(), p.returncode(), str(e))
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.frameworks.errors.InferenceError(error_message)

            if p.returncode != 0:
                error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(), p.returncode)
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.frameworks.errors.InferenceError(error_message)
            else:
                self.logger.info('%s classify many task completed.' % self.get_framework_id())
        finally:
            shutil.rmtree(temp_dir_path)

        if isinstance(self.dataset, dataset.GenericImageDatasetJob):
            # task.infer_one() expects dictionary in return value
            return {'output': np.array(predictions)}
        else:
            return (labels,np.array(predictions))
    def infer_many_images(self, images, snapshot_epoch=None, gpu=None):
        """
        Returns (labels, results):
        labels -- an array of strings
        results -- a 2D np array:
            [
                [image0_label0_confidence, image0_label1_confidence, ...],
                [image1_label0_confidence, image1_label1_confidence, ...],
                ...
            ]

        Arguments:
        images -- a list of np.arrays

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        """

        # create a temporary folder to store images and a temporary file
        # to store a list of paths to the images
        temp_dir_path = tempfile.mkdtemp(suffix='.tfrecords')
        try:  # this try...finally clause is used to clean up the temp directory in any case
            with open(os.path.join(temp_dir_path, 'list.txt'), 'w') as imglist_file:
                for image in images:
                    if image.ndim < 3:
                        image = image[..., np.newaxis]
                    image = image.astype('float')
                    temp_image_handle, temp_image_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.tfrecords')
                    writer = tf.python_io.TFRecordWriter(temp_image_path)
                    record = tf.train.Example(features=tf.train.Features(feature={
                        'height': _int64_feature(image.shape[0]),
                        'width': _int64_feature(image.shape[1]),
                        'depth': _int64_feature(image.shape[2]),
                        'image_raw': _float_array_feature(image.flatten()),
                        'label': _int64_feature(0),
                        'encoding': _int64_feature(0)}))
                    writer.write(record.SerializeToString())
                    writer.close()
                    imglist_file.write("%s\n" % temp_image_path)
                    os.close(temp_image_handle)

            file_to_load = self.get_snapshot(snapshot_epoch)

            args = [config_value('tensorflow')['executable'],
                    os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'),
                    '--testMany=1',
                    '--allPredictions=1',  # all predictions are grabbed and formatted as required by DIGITS
                    '--inference_db=%s' % str(temp_dir_path),
                    '--network=%s' % self.model_file,
                    '--networkDirectory=%s' % self.job_dir,
                    '--weights=%s' % file_to_load,
                    ]

            if hasattr(self.dataset, 'labels_file'):
                args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file))

            if self.use_mean != 'none':
                mean_file = self.dataset.get_mean_file()
                assert mean_file is not None, 'Failed to retrieve mean file.'
                args.append('--mean=%s' % self.dataset.path(mean_file))

            if self.use_mean == 'pixel':
                args.append('--subtractMean=pixel')
            elif self.use_mean == 'image':
                args.append('--subtractMean=image')
            else:
                args.append('--subtractMean=none')
            if self.crop_size:
                args.append('--croplen=%d' % self.crop_size)

            # Convert them all to strings
            args = [str(x) for x in args]

            self.logger.info('%s classify many task started.' % self.name())

            env = os.environ.copy()
            if gpu is not None:
                # make only the selected GPU visible
                env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])

            unrecognized_output = []
            predictions = []
            p = subprocess.Popen(args,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 cwd=self.job_dir,
                                 close_fds=True,
                                 env=env)

            try:
                while p.poll() is None:
                    for line in utils.nonblocking_readlines(p.stdout):
                        if self.aborted.is_set():
                            p.terminate()
                            raise digits.inference.errors.InferenceError('%s classify many task got aborted.'
                                                                         'error code - %d' % (self.get_framework_id(),
                                                                                              p.returncode))

                        if line is not None and len(line) > 1:
                            if not self.process_test_output(line, predictions, 'many'):
                                self.logger.warning('%s classify many task unrecognized input: %s' % (
                                    self.get_framework_id(), line.strip()))
                                unrecognized_output.append(line)
                        else:
                            time.sleep(0.05)
            except Exception as e:
                if p.poll() is None:
                    p.terminate()
                error_message = ''
                if type(e) == digits.inference.errors.InferenceError:
                    error_message = e.__str__()
                else:
                    error_message = '%s classify many task failed with error code %d \n %s' % (
                        self.get_framework_id(), p.returncode, str(e))
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)

            if p.returncode != 0:
                error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(),
                                                                                     p.returncode)
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)
            else:
                self.logger.info('%s classify many task completed.' % self.get_framework_id())
        finally:
            shutil.rmtree(temp_dir_path)

        # task.infer_one() expects dictionary in return value
        return {'output': np.array(predictions)}
    def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None):
        """
        Classify an image
        Returns (predictions, visualizations)
            predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
            visualizations -- an array of (layer_name, activations, weights) for the specified layers
        Returns (None, None) if something goes wrong

        Arguments:
        image -- a np.array

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        layers -- which layer activation[s] and weight[s] to visualize
        """
        temp_image_handle, temp_image_path = tempfile.mkstemp(suffix='.tfrecords')
        os.close(temp_image_handle)
        if image.ndim < 3:
            image = image[..., np.newaxis]
        writer = tf.python_io.TFRecordWriter(temp_image_path)

        image = image.astype('float')
        record = tf.train.Example(features=tf.train.Features(feature={
            'height': _int64_feature(image.shape[0]),
            'width': _int64_feature(image.shape[1]),
            'depth': _int64_feature(image.shape[2]),
            'image_raw': _float_array_feature(image.flatten()),
            'label': _int64_feature(0),
            'encoding': _int64_feature(0)}))
        writer.write(record.SerializeToString())
        writer.close()

        file_to_load = self.get_snapshot(snapshot_epoch)

        args = [config_value('tensorflow')['executable'],
                os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'),
                '--inference_db=%s' % temp_image_path,
                '--network=%s' % self.model_file,
                '--networkDirectory=%s' % self.job_dir,
                '--weights=%s' % file_to_load,
                '--allPredictions=1',
                '--batch_size=1',
                ]
        if hasattr(self.dataset, 'labels_file'):
            args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file))

        if self.use_mean != 'none':
            mean_file = self.dataset.get_mean_file()
            assert mean_file is not None, 'Failed to retrieve mean file.'
            args.append('--mean=%s' % self.dataset.path(mean_file))

        if self.use_mean == 'pixel':
            args.append('--subtractMean=pixel')
        elif self.use_mean == 'image':
            args.append('--subtractMean=image')
        else:
            args.append('--subtractMean=none')

        if self.crop_size:
            args.append('--croplen=%d' % self.crop_size)

        if layers == 'all':
            args.append('--visualize_inf=1')
            args.append('--save=%s' % self.job_dir)

        # Convert them all to strings
        args = [str(x) for x in args]

        self.logger.info('%s classify one task started.' % self.get_framework_id())

        unrecognized_output = []
        predictions = []
        self.visualization_file = None

        env = os.environ.copy()

        if gpu is not None:
            # make only the selected GPU visible
            env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])

        p = subprocess.Popen(args,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             cwd=self.job_dir,
                             close_fds=True,
                             env=env)

        try:
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    if self.aborted.is_set():
                        p.terminate()
                        raise digits.inference.errors.InferenceError('%s classify one task got aborted. error code - %d' % (self.get_framework_id(), p.returncode))  # noqa

                    if line is not None and len(line) > 1:
                        if not self.process_test_output(line, predictions, 'one'):
                            self.logger.warning('%s classify one task unrecognized input: %s' % (
                                self.get_framework_id(), line.strip()))
                            unrecognized_output.append(line)
                    else:
                        time.sleep(0.05)
        except Exception as e:
            if p.poll() is None:
                p.terminate()
            error_message = ''
            if type(e) == digits.inference.errors.InferenceError:
                error_message = e.__str__()
            else:
                error_message = '%s classify one task failed with error code %d \n %s' % (
                    self.get_framework_id(), p.returncode, str(e))
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = '\n'.join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)

        finally:
            self.after_test_run(temp_image_path)

        if p.returncode != 0:
            error_message = '%s classify one task failed with error code %d' % (self.get_framework_id(), p.returncode)
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = '\n'.join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)
        else:
            self.logger.info('%s classify one task completed.' % self.get_framework_id())

        predictions = {'output': np.array(predictions)}

        visualizations = []

        if layers == 'all' and self.visualization_file:
            vis_db = h5py.File(self.visualization_file, 'r')
            # the HDF5 database is organized as follows:
            # <root>
            # |- layers
            #    |- 1
            #    |  [attrs] - op
            #    |  [attrs] - var
            #    |  |- activations
            #    |  |- weights
            #    |- 2
            for layer_id, layer in vis_db['layers'].items():
                op_name = layer.attrs['op']
                var_name = layer.attrs['var']
                layer_desc = "%s\n%s" % (op_name, var_name)
                idx = int(layer_id)
                # activations (tf: operation outputs)
                if 'activations' in layer:
                    data = np.array(layer['activations'][...])
                    if len(data.shape) > 1 and data.shape[0] == 1:
                        # skip batch dimension
                        data = data[0]
                    if len(data.shape) == 3:
                        data = data.transpose(2, 0, 1)
                    elif len(data.shape) == 4:
                        data = data.transpose(3, 2, 0, 1)
                    vis = utils.image.get_layer_vis_square(data)
                    mean, std, hist = self.get_layer_statistics(data)
                    visualizations.append(
                        {
                            'id': idx,
                            'name': layer_desc,
                            'vis_type': 'Activations',
                            'vis': vis,
                            'data_stats': {
                                'shape': data.shape,
                                'mean':  mean,
                                'stddev':  std,
                                'histogram': hist,
                            }
                        }
                    )
                # weights (tf: variables)
                if 'weights' in layer:
                    data = np.array(layer['weights'][...])
                    if len(data.shape) == 3:
                        data = data.transpose(2, 0, 1)
                    elif len(data.shape) == 4:
                        data = data.transpose(3, 2, 0, 1)
                    if 'MatMul' in layer_desc:
                        vis = None  # too many layers to display?
                    else:
                        vis = utils.image.get_layer_vis_square(data)
                    mean, std, hist = self.get_layer_statistics(data)
                    parameter_count = reduce(operator.mul, data.shape, 1)
                    visualizations.append(
                        {
                            'id':  idx,
                            'name': layer_desc,
                            'vis_type': 'Weights',
                            'vis': vis,
                            'param_count': parameter_count,
                            'data_stats': {
                                'shape': data.shape,
                                'mean': mean,
                                'stddev': std,
                                'histogram': hist,
                            }
                        }
                    )
            # sort by layer ID
            visualizations = sorted(visualizations, key=lambda x: x['id'])
        return (predictions, visualizations)
示例#8
0
    def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None):
        """
        Classify an image
        Returns (predictions, visualizations)
            predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
            visualizations -- an array of (layer_name, activations, weights) for the specified layers
        Returns (None, None) if something goes wrong

        Arguments:
        image -- a np.array

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        layers -- which layer activation[s] and weight[s] to visualize
        """
        temp_image_handle, temp_image_path = tempfile.mkstemp(suffix=".png")
        os.close(temp_image_handle)
        image = PIL.Image.fromarray(image)
        try:
            image.save(temp_image_path, format="png")
        except KeyError:
            error_message = 'Unable to save file to "%s"' % temp_image_path
            self.logger.error(error_message)
            raise digits.inference.errors.InferenceError(error_message)

        if config_value("torch_root") == "<PATHS>":
            torch_bin = "th"
        else:
            torch_bin = os.path.join(config_value("torch_root"), "bin", "th")

        file_to_load = self.get_snapshot(snapshot_epoch)

        args = [
            torch_bin,
            os.path.join(os.path.dirname(os.path.dirname(digits.__file__)), "tools", "torch", "wrapper.lua"),
            "test.lua",
            "--image=%s" % temp_image_path,
            "--network=%s" % self.model_file.split(".")[0],
            "--networkDirectory=%s" % self.job_dir,
            "--snapshot=%s" % file_to_load,
            "--allPredictions=yes",
        ]
        if hasattr(self.dataset, "labels_file"):
            args.append("--labels=%s" % self.dataset.path(self.dataset.labels_file))

        if self.use_mean != "none":
            filename = self.create_mean_file()
            args.append("--mean=%s" % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))

        if self.use_mean == "pixel":
            args.append("--subtractMean=pixel")
        elif self.use_mean == "image":
            args.append("--subtractMean=image")
        else:
            args.append("--subtractMean=none")

        if self.crop_size:
            args.append("--crop=yes")
            args.append("--croplen=%d" % self.crop_size)

        if layers == "all":
            args.append("--visualization=yes")
            args.append("--save=%s" % self.job_dir)

        # Convert them all to strings
        args = [str(x) for x in args]

        regex = re.compile("\x1b\[[0-9;]*m", re.UNICODE)  # TODO: need to include regular expression for MAC color codes
        self.logger.info("%s classify one task started." % self.get_framework_id())

        unrecognized_output = []
        predictions = []
        self.visualization_file = None

        env = os.environ.copy()

        if gpu is not None:
            args.append("--type=cuda")
            # make only the selected GPU visible
            env["CUDA_VISIBLE_DEVICES"] = "%d" % gpu
        else:
            args.append("--type=float")

        p = subprocess.Popen(
            args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.job_dir, close_fds=True, env=env
        )

        try:
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    if self.aborted.is_set():
                        p.terminate()
                        raise digits.inference.errors.InferenceError(
                            "%s classify one task got aborted. error code - %d"
                            % (self.get_framework_id(), p.returncode)
                        )

                    if line is not None:
                        # Remove color codes and whitespace
                        line = regex.sub("", line).strip()
                    if line:
                        if not self.process_test_output(line, predictions, "one"):
                            self.logger.warning(
                                "%s classify one task unrecognized input: %s" % (self.get_framework_id(), line.strip())
                            )
                            unrecognized_output.append(line)
                    else:
                        time.sleep(0.05)

        except Exception as e:
            if p.poll() is None:
                p.terminate()
            error_message = ""
            if type(e) == digits.inference.errors.InferenceError:
                error_message = e.__str__()
            else:
                error_message = "%s classify one task failed with error code %d \n %s" % (
                    self.get_framework_id(),
                    p.returncode,
                    str(e),
                )
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = "\n".join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)

        finally:
            self.after_test_run(temp_image_path)

        if p.returncode != 0:
            error_message = "%s classify one task failed with error code %d" % (self.get_framework_id(), p.returncode)
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = "\n".join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)
        else:
            self.logger.info("%s classify one task completed." % self.get_framework_id())

        predictions = {"output": np.array(predictions)}

        visualizations = []

        if layers == "all" and self.visualization_file:
            vis_db = h5py.File(self.visualization_file, "r")
            # the HDF5 database is organized as follows:
            # <root>
            # |- layers
            #    |- 1
            #    |  |- name
            #    |  |- activations
            #    |  |- weights
            #    |- 2
            for layer_id, layer in vis_db["layers"].items():
                layer_desc = layer["name"][...].tostring()
                if "Sequential" in layer_desc or "Parallel" in layer_desc:
                    # ignore containers
                    continue
                idx = int(layer_id)
                # activations
                if "activations" in layer:
                    data = np.array(layer["activations"][...])
                    # skip batch dimension
                    if len(data.shape) > 1 and data.shape[0] == 1:
                        data = data[0]
                    vis = utils.image.get_layer_vis_square(data)
                    mean, std, hist = self.get_layer_statistics(data)
                    visualizations.append(
                        {
                            "id": idx,
                            "name": layer_desc,
                            "vis_type": "Activations",
                            "vis": vis,
                            "data_stats": {"shape": data.shape, "mean": mean, "stddev": std, "histogram": hist},
                        }
                    )
                # weights
                if "weights" in layer:
                    data = np.array(layer["weights"][...])
                    if "Linear" not in layer_desc:
                        vis = utils.image.get_layer_vis_square(data)
                    else:
                        # Linear (inner product) layers have too many weights
                        # to display
                        vis = None
                    mean, std, hist = self.get_layer_statistics(data)
                    parameter_count = reduce(operator.mul, data.shape, 1)
                    if "bias" in layer:
                        bias = np.array(layer["bias"][...])
                        parameter_count += reduce(operator.mul, bias.shape, 1)
                    visualizations.append(
                        {
                            "id": idx,
                            "name": layer_desc,
                            "vis_type": "Weights",
                            "vis": vis,
                            "param_count": parameter_count,
                            "data_stats": {"shape": data.shape, "mean": mean, "stddev": std, "histogram": hist},
                        }
                    )
            # sort by layer ID
            visualizations = sorted(visualizations, key=lambda x: x["id"])
        return (predictions, visualizations)
示例#9
0
    def get_network_visualization(self, **kwargs):
        """
        return visualization of network
        """
        desc = kwargs['desc']
        dataset = kwargs['dataset']
        solver_type = kwargs['solver_type'].lower(
        ) if kwargs['solver_type'] else None
        use_mean = kwargs['use_mean']
        crop_size = kwargs['crop_size']
        num_gpus = kwargs['num_gpus']
        if dataset is None:
            raise NetworkVisualizationError(
                'Make sure a dataset is selected to visualize this network.')

        # save network description to temporary file
        temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.py')
        os.write(temp_network_handle, desc)
        os.close(temp_network_handle)

        # Generate a temporaty file to put the graph definition in
        _, temp_graphdef_path = tempfile.mkstemp(suffix='.pbtxt')
        # Another for the HTML
        _, temp_html_path = tempfile.mkstemp(suffix='.html')

        try:  # do this in a try..finally clause to make sure we delete the temp file
            # build command line
            args = [
                config_value('tensorflow')['executable'],
                os.path.join(os.path.dirname(digits.__file__), 'tools',
                             'tensorflow', 'main.py'),
                '--network=%s' % os.path.basename(temp_network_path),
                '--networkDirectory=%s' % os.path.dirname(temp_network_path),
                '--visualizeModelPath=%s' % temp_graphdef_path,
                '--optimization=%s' % solver_type,
            ]

            if crop_size:
                args.append('--croplen=%s' % crop_size)

            if use_mean and use_mean != 'none':
                mean_file = dataset.get_mean_file()
                assert mean_file is not None, 'Failed to retrieve mean file.'
                args.append('--subtractMean=%s' % use_mean)
                args.append('--mean=%s' % dataset.path(mean_file))

            if hasattr(dataset, 'labels_file'):
                args.append('--labels_list=%s' %
                            dataset.path(dataset.labels_file))

            train_feature_db_path = dataset.get_feature_db_path(
                constants.TRAIN_DB)
            train_label_db_path = dataset.get_label_db_path(constants.TRAIN_DB)
            val_feature_db_path = dataset.get_feature_db_path(constants.VAL_DB)
            val_label_db_path = dataset.get_label_db_path(constants.VAL_DB)

            args.append('--train_db=%s' % train_feature_db_path)
            if train_label_db_path:
                args.append('--train_labels=%s' % train_label_db_path)
            if val_feature_db_path:
                args.append('--validation_db=%s' % val_feature_db_path)
            if val_label_db_path:
                args.append('--validation_labels=%s' % val_label_db_path)

            env = os.environ.copy()
            # make only a selected number of GPUs visible. The ID is not important for just the vis
            env['CUDA_VISIBLE_DEVICES'] = ",".join(
                [str(i) for i in range(0, int(num_gpus))])

            # execute command
            p = subprocess.Popen(args,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 close_fds=True,
                                 env=env)

            stdout_log = ''
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    timestamp, level, message = TensorflowTrainTask.preprocess_output_tensorflow(
                        line.strip())
                    if line is not None:
                        stdout_log += line
            if p.returncode:
                raise NetworkVisualizationError(stdout_log)
            else:  # Success!
                return repr(str(open(temp_graphdef_path).read()))
        finally:
            os.remove(temp_network_path)
            os.remove(temp_graphdef_path)
示例#10
0
    def classify_many(self, images, snapshot_epoch=None):
        """
        Returns (labels, results):
        labels -- an array of strings
        results -- a 2D np array:
            [
                [image0_label0_confidence, image0_label1_confidence, ...],
                [image1_label0_confidence, image1_label1_confidence, ...],
                ...
            ]

        Arguments:
        images -- a list of np.arrays

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        """

        # create a temporary folder to store images and a temporary file
        # to store a list of paths to the images
        temp_dir_path = tempfile.mkdtemp()
        try: # this try...finally clause is used to clean up the temp directory in any case
            temp_imglist_handle, temp_imglist_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.txt')
            for image in images:
                temp_image_handle, temp_image_path = tempfile.mkstemp(
                        dir=temp_dir_path, suffix='.jpeg')
                image = PIL.Image.fromarray(image)
                try:
                    image.save(temp_image_path, format='jpeg')
                except KeyError:
                    error_message = 'Unable to save file to "%s"' % temp_image_path
                    self.logger.error(error_message)
                    raise digits.frameworks.errors.InferenceError(error_message)
                os.write(temp_imglist_handle, "%s\n" % temp_image_path)
                os.close(temp_image_handle)
            os.close(temp_imglist_handle)

            if config_value('torch_root') == '<PATHS>':
                torch_bin = 'th'
            else:
                torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')

            args = [torch_bin,
                    os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','test.lua'),
                    '--testMany=yes',
                    '--allPredictions=yes',   #all predictions are grabbed and formatted as required by DIGITS
                    '--image=%s' % str(temp_imglist_path),
                    '--resizeMode=%s' % str(self.dataset.resize_mode),   # Here, we are using original images, so they will be resized in Torch code. This logic needs to be changed to eliminate the rework of resizing. Need to find a way to send python images array to Lua script efficiently
                    '--network=%s' % self.model_file.split(".")[0],
                    '--networkDirectory=%s' % self.job_dir,
                    '--load=%s' % self.job_dir,
                    '--snapshotPrefix=%s' % self.snapshot_prefix,
                    ]

            if isinstance(self.dataset, ImageClassificationDatasetJob):
                labels = self.get_labels()         #TODO: probably we no need to return this, as we can directly access from the calling function
                args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))
                args.append('--mean=%s' % self.dataset.path(constants.MEAN_FILE_IMAGE))
            elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
                if self.use_mean != 'none':
                    args.append('--mean=%s' % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))

            if snapshot_epoch:
                args.append('--epoch=%d' % int(snapshot_epoch))
            if self.trained_on_cpu:
                args.append('--type=float')

            if self.use_mean == 'pixel':
                args.append('--subtractMean=pixel')
            elif self.use_mean == 'image':
                args.append('--subtractMean=image')
            else:
                args.append('--subtractMean=none')

            if self.crop_size:
                args.append('--crop=yes')
                args.append('--croplen=%d' % self.crop_size)

            # Convert them all to strings
            args = [str(x) for x in args]

            regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)   #TODO: need to include regular expression for MAC color codes
            self.logger.info('%s classify many task started.' % self.name())

            unrecognized_output = []
            predictions = []
            p = subprocess.Popen(args,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                    cwd=self.job_dir,
                    close_fds=True,
                    )

            try:
                while p.poll() is None:
                    for line in utils.nonblocking_readlines(p.stdout):
                        if self.aborted.is_set():
                            p.terminate()
                            raise digits.frameworks.errors.InferenceError('%s classify many task got aborted. error code - %d' % (self.get_framework_id(), p.returncode()))

                        if line is not None:
                            # Remove whitespace and color codes. color codes are appended to begining and end of line by torch binary i.e., 'th'. Check the below link for more information
                            # https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ
                            line=regex.sub('', line).strip()
                        if line:
                            if not self.process_test_output(line, predictions, 'many'):
                                self.logger.warning('%s classify many task unrecognized input: %s' % (self.get_framework_id(), line.strip()))
                                unrecognized_output.append(line)
                        else:
                            time.sleep(0.05)
            except Exception as e:
                if p.poll() is None:
                    p.terminate()
                error_message = ''
                if type(e) == digits.frameworks.errors.InferenceError:
                    error_message = e.__str__()
                else:
                    error_message = '%s classify many task failed with error code %d \n %s' % (self.get_framework_id(), p.returncode(), str(e))
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.frameworks.errors.InferenceError(error_message)

            if p.returncode != 0:
                error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(), p.returncode)
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.frameworks.errors.InferenceError(error_message)
            else:
                self.logger.info('%s classify many task completed.' % self.get_framework_id())
        finally:
            shutil.rmtree(temp_dir_path)

        if isinstance(self.dataset, dataset.GenericImageDatasetJob):
            # task.infer_one() expects dictionary in return value
            return {'output': np.array(predictions)}
        else:
            return (labels,np.array(predictions))
示例#11
0
    def infer_many_images(self, images, snapshot_epoch=None, gpu=None):
        """
        Returns (labels, results):
        labels -- an array of strings
        results -- a 2D np array:
            [
                [image0_label0_confidence, image0_label1_confidence, ...],
                [image1_label0_confidence, image1_label1_confidence, ...],
                ...
            ]

        Arguments:
        images -- a list of np.arrays

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        """

        # create a temporary folder to store images and a temporary file
        # to store a list of paths to the images
        temp_dir_path = tempfile.mkdtemp(suffix='.tfrecords')
        try:  # this try...finally clause is used to clean up the temp directory in any case
            with open(os.path.join(temp_dir_path, 'list.txt'), 'w') as imglist_file:
                for image in images:
                    if image.ndim < 3:
                        image = image[..., np.newaxis]
                    image = image.astype('float')
                    temp_image_handle, temp_image_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.tfrecords')
                    writer = tf.python_io.TFRecordWriter(temp_image_path)
                    record = tf.train.Example(features=tf.train.Features(feature={
                        'height': _int64_feature(image.shape[0]),
                        'width': _int64_feature(image.shape[1]),
                        'depth': _int64_feature(image.shape[2]),
                        'image_raw': _float_array_feature(image.flatten()),
                        'label': _int64_feature(0),
                        'encoding': _int64_feature(0)}))
                    writer.write(record.SerializeToString())
                    writer.close()
                    imglist_file.write("%s\n" % temp_image_path)
                    os.close(temp_image_handle)

            file_to_load = self.get_snapshot(snapshot_epoch)

            args = [sys.executable,
                    os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'),
                    '--testMany=1',
                    '--allPredictions=1',  # all predictions are grabbed and formatted as required by DIGITS
                    '--inference_db=%s' % str(temp_dir_path),
                    '--network=%s' % self.model_file,
                    '--networkDirectory=%s' % self.job_dir,
                    '--weights=%s' % file_to_load,
                    ]

            if hasattr(self.dataset, 'labels_file'):
                args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file))

            if self.use_mean != 'none':
                mean_file = self.dataset.get_mean_file()
                assert mean_file is not None, 'Failed to retrieve mean file.'
                args.append('--mean=%s' % self.dataset.path(mean_file))

            if self.use_mean == 'pixel':
                args.append('--subtractMean=pixel')
            elif self.use_mean == 'image':
                args.append('--subtractMean=image')
            else:
                args.append('--subtractMean=none')
            if self.crop_size:
                args.append('--croplen=%d' % self.crop_size)

            # Convert them all to strings
            args = [str(x) for x in args]

            self.logger.info('%s classify many task started.' % self.name())

            env = os.environ.copy()
            if gpu is not None:
                # make only the selected GPU visible
                env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])

            unrecognized_output = []
            predictions = []
            p = subprocess.Popen(args,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 cwd=self.job_dir,
                                 close_fds=True,
                                 env=env)

            try:
                while p.poll() is None:
                    for line in utils.nonblocking_readlines(p.stdout):
                        if self.aborted.is_set():
                            p.terminate()
                            raise digits.inference.errors.InferenceError('%s classify many task got aborted.'
                                                                         'error code - %d' % (self.get_framework_id(),
                                                                                              p.returncode))

                        if line is not None and len(line) > 1:
                            if not self.process_test_output(line, predictions, 'many'):
                                self.logger.warning('%s classify many task unrecognized input: %s' % (
                                    self.get_framework_id(), line.strip()))
                                unrecognized_output.append(line)
                        else:
                            time.sleep(0.05)
            except Exception as e:
                if p.poll() is None:
                    p.terminate()
                error_message = ''
                if type(e) == digits.inference.errors.InferenceError:
                    error_message = e.__str__()
                else:
                    error_message = '%s classify many task failed with error code %d \n %s' % (
                        self.get_framework_id(), p.returncode, str(e))
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)

            if p.returncode != 0:
                error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(),
                                                                                     p.returncode)
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)
            else:
                self.logger.info('%s classify many task completed.' % self.get_framework_id())
        finally:
            shutil.rmtree(temp_dir_path)

        # task.infer_one() expects dictionary in return value
        return {'output': np.array(predictions)}
示例#12
0
    def infer_one_image(self, image, snapshot_epoch=None, layers=None, gpu=None):
        """
        Classify an image
        Returns (predictions, visualizations)
            predictions -- an array of [ (label, confidence), ...] for each label, sorted by confidence
            visualizations -- an array of (layer_name, activations, weights) for the specified layers
        Returns (None, None) if something goes wrong

        Arguments:
        image -- a np.array

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        layers -- which layer activation[s] and weight[s] to visualize
        """
        temp_image_handle, temp_image_path = tempfile.mkstemp(suffix='.tfrecords')
        os.close(temp_image_handle)
        if image.ndim < 3:
            image = image[..., np.newaxis]
        writer = tf.python_io.TFRecordWriter(temp_image_path)

        image = image.astype('float')
        record = tf.train.Example(features=tf.train.Features(feature={
            'height': _int64_feature(image.shape[0]),
            'width': _int64_feature(image.shape[1]),
            'depth': _int64_feature(image.shape[2]),
            'image_raw': _float_array_feature(image.flatten()),
            'label': _int64_feature(0),
            'encoding': _int64_feature(0)}))
        writer.write(record.SerializeToString())
        writer.close()

        file_to_load = self.get_snapshot(snapshot_epoch)

        args = [sys.executable,
                os.path.join(os.path.dirname(os.path.abspath(digits.__file__)), 'tools', 'tensorflow', 'main.py'),
                '--inference_db=%s' % temp_image_path,
                '--network=%s' % self.model_file,
                '--networkDirectory=%s' % self.job_dir,
                '--weights=%s' % file_to_load,
                '--allPredictions=1',
                '--batch_size=1',
                ]
        if hasattr(self.dataset, 'labels_file'):
            args.append('--labels_list=%s' % self.dataset.path(self.dataset.labels_file))

        if self.use_mean != 'none':
            mean_file = self.dataset.get_mean_file()
            assert mean_file is not None, 'Failed to retrieve mean file.'
            args.append('--mean=%s' % self.dataset.path(mean_file))

        if self.use_mean == 'pixel':
            args.append('--subtractMean=pixel')
        elif self.use_mean == 'image':
            args.append('--subtractMean=image')
        else:
            args.append('--subtractMean=none')

        if self.crop_size:
            args.append('--croplen=%d' % self.crop_size)

        if layers == 'all':
            args.append('--visualize_inf=1')
            args.append('--save=%s' % self.job_dir)

        # Convert them all to strings
        args = [str(x) for x in args]

        self.logger.info('%s classify one task started.' % self.get_framework_id())

        unrecognized_output = []
        predictions = []
        self.visualization_file = None

        env = os.environ.copy()

        if gpu is not None:
            # make only the selected GPU visible
            env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])

        p = subprocess.Popen(args,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.STDOUT,
                             cwd=self.job_dir,
                             close_fds=True,
                             env=env)

        try:
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    if self.aborted.is_set():
                        p.terminate()
                        raise digits.inference.errors.InferenceError('%s classify one task got aborted. error code - %d' % (self.get_framework_id(), p.returncode))  # noqa

                    if line is not None and len(line) > 1:
                        if not self.process_test_output(line, predictions, 'one'):
                            self.logger.warning('%s classify one task unrecognized input: %s' % (
                                self.get_framework_id(), line.strip()))
                            unrecognized_output.append(line)
                    else:
                        time.sleep(0.05)
        except Exception as e:
            if p.poll() is None:
                p.terminate()
            error_message = ''
            if type(e) == digits.inference.errors.InferenceError:
                error_message = e.__str__()
            else:
                error_message = '%s classify one task failed with error code %d \n %s' % (
                    self.get_framework_id(), p.returncode, str(e))
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = '\n'.join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)

        finally:
            self.after_test_run(temp_image_path)

        if p.returncode != 0:
            error_message = '%s classify one task failed with error code %d' % (self.get_framework_id(), p.returncode)
            self.logger.error(error_message)
            if unrecognized_output:
                unrecognized_output = '\n'.join(unrecognized_output)
                error_message = error_message + unrecognized_output
            raise digits.inference.errors.InferenceError(error_message)
        else:
            self.logger.info('%s classify one task completed.' % self.get_framework_id())

        predictions = {'output': np.array(predictions)}

        visualizations = []

        if layers == 'all' and self.visualization_file:
            vis_db = h5py.File(self.visualization_file, 'r')
            # the HDF5 database is organized as follows:
            # <root>
            # |- layers
            #    |- 1
            #    |  [attrs] - op
            #    |  [attrs] - var
            #    |  |- activations
            #    |  |- weights
            #    |- 2
            for layer_id, layer in vis_db['layers'].items():
                op_name = layer.attrs['op']
                var_name = layer.attrs['var']
                layer_desc = "%s\n%s" % (op_name, var_name)
                idx = int(layer_id)
                # activations (tf: operation outputs)
                if 'activations' in layer:
                    data = np.array(layer['activations'][...])
                    if len(data.shape) > 1 and data.shape[0] == 1:
                        # skip batch dimension
                        data = data[0]
                    if len(data.shape) == 3:
                        data = data.transpose(2, 0, 1)
                    elif len(data.shape) == 4:
                        data = data.transpose(3, 2, 0, 1)
                    vis = utils.image.get_layer_vis_square(data)
                    mean, std, hist = self.get_layer_statistics(data)
                    visualizations.append(
                        {
                            'id': idx,
                            'name': layer_desc,
                            'vis_type': 'Activations',
                            'vis': vis,
                            'data_stats': {
                                'shape': data.shape,
                                'mean':  mean,
                                'stddev':  std,
                                'histogram': hist,
                            }
                        }
                    )
                # weights (tf: variables)
                if 'weights' in layer:
                    data = np.array(layer['weights'][...])
                    if len(data.shape) == 3:
                        data = data.transpose(2, 0, 1)
                    elif len(data.shape) == 4:
                        data = data.transpose(3, 2, 0, 1)
                    if 'MatMul' in layer_desc:
                        vis = None  # too many layers to display?
                    else:
                        vis = utils.image.get_layer_vis_square(data)
                    mean, std, hist = self.get_layer_statistics(data)
                    parameter_count = reduce(operator.mul, data.shape, 1)
                    visualizations.append(
                        {
                            'id':  idx,
                            'name': layer_desc,
                            'vis_type': 'Weights',
                            'vis': vis,
                            'param_count': parameter_count,
                            'data_stats': {
                                'shape': data.shape,
                                'mean': mean,
                                'stddev': std,
                                'histogram': hist,
                            }
                        }
                    )
            # sort by layer ID
            visualizations = sorted(visualizations, key=lambda x: x['id'])
        return (predictions, visualizations)
示例#13
0
    def get_network_visualization(self, **kwargs):
        """
        return visualization of network
        """
        desc = kwargs['desc']
        dataset = kwargs['dataset']
        solver_type = kwargs['solver_type'].lower() if kwargs['solver_type'] else None
        use_mean = kwargs['use_mean']
        crop_size = kwargs['crop_size']
        num_gpus = kwargs['num_gpus']
        if dataset is None:
            raise NetworkVisualizationError('Make sure a dataset is selected to visualize this network.')

        # save network description to temporary file
        temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.py')
        os.write(temp_network_handle, desc)
        os.close(temp_network_handle)

        # Generate a temporaty file to put the graph definition in
        _, temp_graphdef_path = tempfile.mkstemp(suffix='.pbtxt')
        # Another for the HTML
        _, temp_html_path = tempfile.mkstemp(suffix='.html')

        try:  # do this in a try..finally clause to make sure we delete the temp file
            # build command line
            args = [sys.executable,
                    os.path.join(os.path.dirname(digits.__file__), 'tools', 'tensorflow', 'main.py'),
                    '--network=%s' % os.path.basename(temp_network_path),
                    '--networkDirectory=%s' % os.path.dirname(temp_network_path),
                    '--visualizeModelPath=%s' % temp_graphdef_path,
                    '--optimization=%s' % solver_type,
                    ]

            if crop_size:
                args.append('--croplen=%s' % crop_size)

            if use_mean and use_mean != 'none':
                mean_file = dataset.get_mean_file()
                assert mean_file is not None, 'Failed to retrieve mean file.'
                args.append('--subtractMean=%s' % use_mean)
                args.append('--mean=%s' % dataset.path(mean_file))

            if hasattr(dataset, 'labels_file'):
                args.append('--labels_list=%s' % dataset.path(dataset.labels_file))

            train_feature_db_path = dataset.get_feature_db_path(constants.TRAIN_DB)
            train_label_db_path = dataset.get_label_db_path(constants.TRAIN_DB)
            val_feature_db_path = dataset.get_feature_db_path(constants.VAL_DB)
            val_label_db_path = dataset.get_label_db_path(constants.VAL_DB)

            args.append('--train_db=%s' % train_feature_db_path)
            if train_label_db_path:
                args.append('--train_labels=%s' % train_label_db_path)
            if val_feature_db_path:
                args.append('--validation_db=%s' % val_feature_db_path)
            if val_label_db_path:
                args.append('--validation_labels=%s' % val_label_db_path)

            env = os.environ.copy()
            # make only a selected number of GPUs visible. The ID is not important for just the vis
            env['CUDA_VISIBLE_DEVICES'] = ",".join([str(i) for i in range(0, int(num_gpus))])

            # execute command
            p = subprocess.Popen(args,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 close_fds=True,
                                 env=env)

            stdout_log = ''
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    timestamp, level, message = TensorflowTrainTask.preprocess_output_tensorflow(line.strip())
                    if line is not None:
                        stdout_log += line
            if p.returncode:
                raise NetworkVisualizationError(stdout_log)
            else:  # Success!
                return repr(str(open(temp_graphdef_path).read()))
        finally:
            os.remove(temp_network_path)
            os.remove(temp_graphdef_path)
示例#14
0
    def infer_many_images(self, images, snapshot_epoch=None, gpu=None):
        """
        Returns (labels, results):
        labels -- an array of strings
        results -- a 2D np array:
            [
                [image0_label0_confidence, image0_label1_confidence, ...],
                [image1_label0_confidence, image1_label1_confidence, ...],
                ...
            ]

        Arguments:
        images -- a list of np.arrays

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        """

        # create a temporary folder to store images and a temporary file
        # to store a list of paths to the images
        temp_dir_path = tempfile.mkdtemp()
        try: # this try...finally clause is used to clean up the temp directory in any case
            temp_imglist_handle, temp_imglist_path = tempfile.mkstemp(dir=temp_dir_path, suffix='.txt')
            for image in images:
                temp_image_handle, temp_image_path = tempfile.mkstemp(
                        dir=temp_dir_path, suffix='.png')
                image = PIL.Image.fromarray(image)
                try:
                    image.save(temp_image_path, format='png')
                except KeyError:
                    error_message = 'Unable to save file to "%s"' % temp_image_path
                    self.logger.error(error_message)
                    raise digits.inference.errors.InferenceError(error_message)
                os.write(temp_imglist_handle, "%s\n" % temp_image_path)
                os.close(temp_image_handle)
            os.close(temp_imglist_handle)

            if config_value('torch_root') == '<PATHS>':
                torch_bin = 'th'
            else:
                torch_bin = os.path.join(config_value('torch_root'), 'bin', 'th')

            file_to_load = self.get_snapshot(snapshot_epoch)

            args = [torch_bin,
                    os.path.join(os.path.dirname(os.path.dirname(digits.__file__)),'tools','torch','wrapper.lua'),
                    'test.lua',
                    '--testMany=yes',
                    '--allPredictions=yes',   #all predictions are grabbed and formatted as required by DIGITS
                    '--image=%s' % str(temp_imglist_path),
                    '--network=%s' % self.model_file.split(".")[0],
                    '--networkDirectory=%s' % self.job_dir,
                    '--snapshot=%s' % file_to_load,
                    ]

            if hasattr(self.dataset, 'labels_file'):
                args.append('--labels=%s' % self.dataset.path(self.dataset.labels_file))

            if self.use_mean != 'none':
                filename = self.create_mean_file()
                args.append('--mean=%s' % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))

            if self.use_mean == 'pixel':
                args.append('--subtractMean=pixel')
            elif self.use_mean == 'image':
                args.append('--subtractMean=image')
            else:
                args.append('--subtractMean=none')
            if self.crop_size:
                args.append('--crop=yes')
                args.append('--croplen=%d' % self.crop_size)

            # Convert them all to strings
            args = [str(x) for x in args]

            regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)   #TODO: need to include regular expression for MAC color codes
            self.logger.info('%s classify many task started.' % self.name())

            env = os.environ.copy()
            if gpu is not None:
                args.append('--type=cuda')
                # make only the selected GPU visible
                env['CUDA_VISIBLE_DEVICES'] = "%d" % gpu
            else:
                args.append('--type=float')

            unrecognized_output = []
            predictions = []
            p = subprocess.Popen(args,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.STDOUT,
                    cwd=self.job_dir,
                    close_fds=True,
                    env=env
                    )

            try:
                while p.poll() is None:
                    for line in utils.nonblocking_readlines(p.stdout):
                        if self.aborted.is_set():
                            p.terminate()
                            raise digits.inference.errors.InferenceError('%s classify many task got aborted. error code - %d' % (self.get_framework_id(), p.returncode))

                        if line is not None:
                            # Remove whitespace and color codes. color codes are appended to beginning and end of line by torch binary i.e., 'th'. Check the below link for more information
                            # https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ
                            line=regex.sub('', line).strip()
                        if line:
                            if not self.process_test_output(line, predictions, 'many'):
                                self.logger.warning('%s classify many task unrecognized input: %s' % (self.get_framework_id(), line.strip()))
                                unrecognized_output.append(line)
                        else:
                            time.sleep(0.05)
            except Exception as e:
                if p.poll() is None:
                    p.terminate()
                error_message = ''
                if type(e) == digits.inference.errors.InferenceError:
                    error_message = e.__str__()
                else:
                    error_message = '%s classify many task failed with error code %d \n %s' % (self.get_framework_id(), p.returncode, str(e))
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)

            if p.returncode != 0:
                error_message = '%s classify many task failed with error code %d' % (self.get_framework_id(), p.returncode)
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)
            else:
                self.logger.info('%s classify many task completed.' % self.get_framework_id())
        finally:
            shutil.rmtree(temp_dir_path)

        # task.infer_one() expects dictionary in return value
        return {'output': np.array(predictions)}
示例#15
0
    def infer_many_images(self, images, snapshot_epoch=None, gpu=None):
        """
        Returns (labels, results):
        labels -- an array of strings
        results -- a 2D np array:
            [
                [image0_label0_confidence, image0_label1_confidence, ...],
                [image1_label0_confidence, image1_label1_confidence, ...],
                ...
            ]

        Arguments:
        images -- a list of np.arrays

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        """

        # create a temporary folder to store images and a temporary file
        # to store a list of paths to the images
        temp_dir_path = tempfile.mkdtemp(suffix='.tmp')
        try:  # this try...finally clause is used to clean up the temp directory in any case
            maxn = len(images) - 1
            w = len(str(maxn))
            for i, image in enumerate(images):
                filename = (('%0' + str(w) + 'd') % i) + '.png'
                print filename
                imsave(os.path.join(temp_dir_path, filename), image)

            file_to_load = self.get_snapshot(snapshot_epoch)

            if 'TASK_WRAPPER' in os.environ:
                executable = os.environ['TASK_WRAPPER']
            else:
                executable = sys.executable

            args = [
                executable,
                os.path.join(os.path.dirname(os.path.abspath(digits.__file__)),
                             'tools', 'deepstacks', 'main.py'),
                '--snapshotFromZero=1',
                '--digits=1',
                '--testMany=1',
                '--allPredictions=1',  # all predictions are grabbed and formatted as required by DIGITS
                '--inference_db=%s' % str(temp_dir_path),
                '--network=%s' % self.model_file,
                '--networkDirectory=%s' % self.job_dir,
                '--weights=%s' % file_to_load,
                '--batch_size=1',  # deepstacks only support 1 now
            ]

            if hasattr(self.dataset, 'labels_file'):
                args.append('--labels_list=%s' %
                            self.dataset.path(self.dataset.labels_file))

            if self.use_mean != 'none':
                mean_file = self.dataset.get_mean_file()
                assert mean_file is not None, 'Failed to retrieve mean file.'
                args.append('--mean=%s' % self.dataset.path(mean_file))

            if self.use_mean == 'pixel':
                args.append('--subtractMean=pixel')
            elif self.use_mean == 'image':
                args.append('--subtractMean=image')
            else:
                args.append('--subtractMean=none')
            if self.crop_size:
                args.append('--croplen=%d' % self.crop_size)

            # Convert them all to strings
            args = [str(x) for x in args]

            self.logger.info('%s classify many task started.' % self.name())

            self.logger.info('Task subprocess args: "%s"' % ' '.join(args))

            env = os.environ.copy()
            #            if gpu is not None:
            #                # make only the selected GPU visible
            #                env['CUDA_VISIBLE_DEVICES'] = subprocess_visible_devices([gpu])

            unrecognized_output = []
            predictions = []
            p = subprocess.Popen(args,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 cwd=self.job_dir,
                                 close_fds=True,
                                 env=env)

            try:
                while p.poll() is None:
                    for line in utils.nonblocking_readlines(p.stdout):
                        if self.aborted.is_set():
                            p.terminate()
                            raise digits.inference.errors.InferenceError(
                                '%s classify many task got aborted.'
                                'error code - %d' %
                                (self.get_framework_id(), p.returncode))

                        if line is not None and len(line) > 1:
                            if not self.process_test_output(
                                    line, predictions, 'many'):
                                self.logger.warning(
                                    '%s classify many task unrecognized input: %s'
                                    % (self.get_framework_id(), line.strip()))
                                unrecognized_output.append(line)
                        else:
                            time.sleep(0.05)
            except Exception as e:
                if p.poll() is None:
                    p.terminate()
                error_message = ''
                if type(e) == digits.inference.errors.InferenceError:
                    error_message = e.__str__()
                else:
                    error_message = '%s classify many task failed with error code %d \n %s' % (
                        self.get_framework_id(), p.returncode, str(e))
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)

            if p.returncode != 0:
                error_message = '%s classify many task failed with error code %d' % (
                    self.get_framework_id(), p.returncode)
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = '\n'.join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.inference.errors.InferenceError(error_message)
            else:
                self.logger.info('%s classify many task completed.' %
                                 self.get_framework_id())
        finally:
            shutil.rmtree(temp_dir_path)

        # task.infer_one() expects dictionary in return value
        return {'output': np.array(predictions)}
示例#16
0
    def get_network_visualization(self, desc):
        """
        return visualization of network
        """
        # save network description to temporary file
        temp_network_handle, temp_network_path = tempfile.mkstemp(suffix='.py')
        os.write(temp_network_handle, desc)
        os.close(temp_network_handle)

        try:  # do this in a try..finally clause to make sure we delete the temp file
            # build command line
            mxnet_bin = config_value('mxnet')['executable']

            args = [mxnet_bin,
                    os.path.join(os.path.dirname(digits.__file__), 'tools', 'mxnet', 'train'),
                    '--network=%s' % os.path.splitext(os.path.basename(temp_network_path))[0],
                    '--networkDirectory=%s' % os.path.dirname(temp_network_path),
                    '--subtractMean=none',  # we are not providing a mean image
                    '--visualizeModel=yes',
                    '--type=float'
                    ]

            # execute command
            p = subprocess.Popen(args,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.STDOUT,
                                 close_fds=True,
                                 )

            # TODO: need to include regular expression for MAC color codes
            regex = re.compile('\x1b\[[0-9;]*m', re.UNICODE)

            # the network description will be accumulated from the command output
            # when collecting_net_definition==True
            collecting_net_definition = False
            desc = []
            unrecognized_output = []
            while p.poll() is None:
                for line in utils.nonblocking_readlines(p.stdout):
                    if line is not None:
                        # Remove whitespace and color codes.
                        # Color codes are appended to beginning and end of line by mxnet binary
                        # i.e., 'th'. Check the below link for more information
                        # https://groups.google.com/forum/#!searchin/mxnet7/color$20codes/mxnet7/8O_0lSgSzuA/Ih6wYg9fgcwJ  # noqa
                        line = regex.sub('', line)
                        timestamp, level, message = MxnetTrainTask.preprocess_output_mxnet(line.strip())
                        if message:
                            if message.startswith('Network definition'):
                                collecting_net_definition = not collecting_net_definition
                        else:
                            if collecting_net_definition:
                                desc.append(line)
                            elif len(line):
                                unrecognized_output.append(line)
                    else:
                        time.sleep(0.05)

            if not len(desc):
                # we did not find a network description
                raise NetworkVisualizationError(''.join(unrecognized_output))
            else:
                output = flask.Markup('<pre>')
                for line in desc:
                    output += flask.Markup.escape(line)
                output += flask.Markup('</pre>')
                return output
        finally:
            os.remove(temp_network_path)
示例#17
0
    def classify_many(self, images, snapshot_epoch=None):
        """
        Returns (labels, results):
        labels -- an array of strings
        results -- a 2D np array:
            [
                [image0_label0_confidence, image0_label1_confidence, ...],
                [image1_label0_confidence, image1_label1_confidence, ...],
                ...
            ]

        Arguments:
        images -- a list of np.arrays

        Keyword arguments:
        snapshot_epoch -- which snapshot to use
        """

        # create a temporary folder to store images and a temporary file
        # to store a list of paths to the images
        temp_dir_path = tempfile.mkdtemp()
        try:  # this try...finally clause is used to clean up the temp directory in any case
            temp_imglist_handle, temp_imglist_path = tempfile.mkstemp(dir=temp_dir_path, suffix=".txt")
            for image in images:
                temp_image_handle, temp_image_path = tempfile.mkstemp(dir=temp_dir_path, suffix=".jpeg")
                image = PIL.Image.fromarray(image)
                try:
                    image.save(temp_image_path, format="jpeg")
                except KeyError:
                    error_message = 'Unable to save file to "%s"' % temp_image_path
                    self.logger.error(error_message)
                    raise digits.frameworks.errors.InferenceError(error_message)
                os.write(temp_imglist_handle, "%s\n" % temp_image_path)
                os.close(temp_image_handle)
            os.close(temp_imglist_handle)

            if config_value("torch_root") == "<PATHS>":
                torch_bin = "th"
            else:
                torch_bin = os.path.join(config_value("torch_root"), "bin", "th")

            args = [
                torch_bin,
                os.path.join(os.path.dirname(os.path.dirname(digits.__file__)), "tools", "torch", "wrapper.lua"),
                "test.lua",
                "--testMany=yes",
                "--allPredictions=yes",  # all predictions are grabbed and formatted as required by DIGITS
                "--image=%s" % str(temp_imglist_path),
                "--resizeMode=%s"
                % str(
                    self.dataset.resize_mode
                ),  # Here, we are using original images, so they will be resized in Torch code. This logic needs to be changed to eliminate the rework of resizing. Need to find a way to send python images array to Lua script efficiently
                "--network=%s" % self.model_file.split(".")[0],
                "--networkDirectory=%s" % self.job_dir,
                "--load=%s" % self.job_dir,
                "--snapshotPrefix=%s" % self.snapshot_prefix,
            ]

            if isinstance(self.dataset, ImageClassificationDatasetJob):
                labels = (
                    self.get_labels()
                )  # TODO: probably we no need to return this, as we can directly access from the calling function
                args.append("--labels=%s" % self.dataset.path(self.dataset.labels_file))
                args.append("--mean=%s" % self.dataset.path(constants.MEAN_FILE_IMAGE))
            elif isinstance(self.dataset, dataset.GenericImageDatasetJob):
                if self.use_mean != "none":
                    args.append("--mean=%s" % os.path.join(self.job_dir, constants.MEAN_FILE_IMAGE))

            if snapshot_epoch:
                args.append("--epoch=%d" % int(snapshot_epoch))
            if self.trained_on_cpu:
                args.append("--type=float")
            else:
                args.append("--type=cuda")

            if self.use_mean == "pixel":
                args.append("--subtractMean=pixel")
            elif self.use_mean == "image":
                args.append("--subtractMean=image")
            else:
                args.append("--subtractMean=none")

            if self.crop_size:
                args.append("--crop=yes")
                args.append("--croplen=%d" % self.crop_size)

            # Convert them all to strings
            args = [str(x) for x in args]

            regex = re.compile(
                "\x1b\[[0-9;]*m", re.UNICODE
            )  # TODO: need to include regular expression for MAC color codes
            self.logger.info("%s classify many task started." % self.name())

            # make only the first GPU visible
            env = os.environ.copy()
            env["CUDA_VISIBLE_DEVICES"] = "0"

            unrecognized_output = []
            predictions = []
            p = subprocess.Popen(
                args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.job_dir, close_fds=True, env=env
            )

            try:
                while p.poll() is None:
                    for line in utils.nonblocking_readlines(p.stdout):
                        if self.aborted.is_set():
                            p.terminate()
                            raise digits.frameworks.errors.InferenceError(
                                "%s classify many task got aborted. error code - %d"
                                % (self.get_framework_id(), p.returncode)
                            )

                        if line is not None:
                            # Remove whitespace and color codes. color codes are appended to begining and end of line by torch binary i.e., 'th'. Check the below link for more information
                            # https://groups.google.com/forum/#!searchin/torch7/color$20codes/torch7/8O_0lSgSzuA/Ih6wYg9fgcwJ
                            line = regex.sub("", line).strip()
                        if line:
                            if not self.process_test_output(line, predictions, "many"):
                                self.logger.warning(
                                    "%s classify many task unrecognized input: %s"
                                    % (self.get_framework_id(), line.strip())
                                )
                                unrecognized_output.append(line)
                        else:
                            time.sleep(0.05)
            except Exception as e:
                if p.poll() is None:
                    p.terminate()
                error_message = ""
                if type(e) == digits.frameworks.errors.InferenceError:
                    error_message = e.__str__()
                else:
                    error_message = "%s classify many task failed with error code %d \n %s" % (
                        self.get_framework_id(),
                        p.returncode,
                        str(e),
                    )
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = "\n".join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.frameworks.errors.InferenceError(error_message)

            if p.returncode != 0:
                error_message = "%s classify many task failed with error code %d" % (
                    self.get_framework_id(),
                    p.returncode,
                )
                self.logger.error(error_message)
                if unrecognized_output:
                    unrecognized_output = "\n".join(unrecognized_output)
                    error_message = error_message + unrecognized_output
                raise digits.frameworks.errors.InferenceError(error_message)
            else:
                self.logger.info("%s classify many task completed." % self.get_framework_id())
        finally:
            shutil.rmtree(temp_dir_path)

        if isinstance(self.dataset, dataset.GenericImageDatasetJob):
            # task.infer_one() expects dictionary in return value
            return {"output": np.array(predictions)}
        else:
            return (labels, np.array(predictions))