Esempio n. 1
0
 def create(self, arguments={}, invert=False):
     from maskgen.zip_tools import AudioPositions
     from maskgen.tool_set import getMilliSecondsAndFrameCount
     from maskgen.video_tools import FileMetaDataLocator, \
         create_segment
     fps = float(getValue(arguments, 'sample rate', 0))
     # use AudioPostions to determine duration and rate
     positions = AudioPositions(FileMetaDataLocator(
         self.startFileName).get_filename(),
                                fps=fps)
     duration = positions.get_total_duration()
     rate = positions.fps
     end_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'End Time', "00:00:00"))
     start_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'Start Time', '00:00:00'))
     if end_time_tuple[0] <= start_time_tuple[0]:
         end_time_tuple = (duration, 0)
     return [
         create_segment(
             starttime=float(start_time_tuple[0]),
             startframe=int(start_time_tuple[0] * rate / 1000.0) + 1,
             endtime=float(end_time_tuple[0]),
             endframe=int(end_time_tuple[0] * rate / 1000.0) + 1,
             type='audio',
             rate=rate)
     ]
Esempio n. 2
0
    def getChangeInFrames(self,
                          edge,
                          meta_i,
                          meta_o,
                          source,
                          target,
                          expectedType='video'):

        if meta_i is None or meta_o is None:
            result = get_meta_data_change_from_edge(edge,
                                                    expectedType=expectedType)
            if result is not None:
                return result
        else:
            change = getValue(meta_i, 'duration', None) != getValue(meta_o, 'duration', None) or \
                     getValue(meta_i, 'nb_frames', None) != getValue(meta_o, 'nb_frames', None) or \
                     getValue(meta_i, 'sample_rate', None) != getValue(meta_o, 'sample_rate', None) or \
                     getValue(meta_i, 'avg_frame_rate', None) != getValue(meta_o, 'avg_frame_rate', None) or \
                     getValue(meta_i, 'duration_ts', None) != getValue(meta_o, 'duration_ts', None)

            if not change:
                return None

        maskSource = self.getMetaDataLocator(
            source).getMaskSetForEntireVideoForTuples(
                media_types=[expectedType])
        maskTarget = self.getMetaDataLocator(
            target).getMaskSetForEntireVideoForTuples(
                media_types=[expectedType])
        return get_frames_from_segment(maskSource[0]), get_end_time_from_segment(maskSource[0]), \
               get_frames_from_segment(maskTarget[0]), get_end_time_from_segment(maskTarget[0]), \
               get_rate_from_segment(maskSource[0]), get_rate_from_segment(maskTarget[0])
Esempio n. 3
0
    def getMasksFromEdge(self, source, target, media_types, channel=0, startTime=None, endTime=None):
        """
        Currently prioritizes masks over entered.  This seems appropriate.  Adjust the software to
        produce masks consistent with recorded change.
        :param filename:q
        :param edge:
        :param media_types:
        :param channel:
        :param startTime:
        :param endTime:
        :return:
        """

        edge = self.graph.get_edge(source, target)
        if 'videomasks' in edge and \
                        edge['videomasks'] is not None and \
                        len(edge['videomasks']) > 0:
            return [mask for mask in edge['videomasks'] if mask['type'] in media_types]
        else:
            result = getMaskSetForEntireVideo(self.getMetaDataLocator(source),
                                              start_time=getValue(edge, 'arguments.Start Time',
                                                                  defaultValue='00:00:00.000')
                                              if startTime is None else startTime,
                                              end_time=getValue(edge, 'arguments.End Time')
                                              if endTime is None else endTime,
                                              media_types=media_types,
                                              channel=channel)
            if result is None or len(result) == 0:
                return None
        return result
Esempio n. 4
0
def transform(img, source, target, **kwargs):
    img = np.asarray(img)
    sizeSource = img.shape
    percentageWidth = float(kwargs['percentage_width'])
    percentageHeight = float(kwargs['percentage_height'])
    pixelWidth = int(sizeSource[1] * percentageWidth)
    pixelHeight = int(sizeSource[0] * percentageHeight)
    keepSize = 'keepSize' in kwargs and kwargs['keepSize'] == 'yes'
    if not keepSize:
        if percentageWidth != 1.0:
            pixelWidth = pixelWidth - pixelWidth % 4
        if percentageHeight != 1.0:
            pixelHeight = pixelHeight - pixelHeight % 4
    sizeDonor = (pixelHeight, pixelWidth)
    keepSize = 'keepSize' in kwargs and kwargs['keepSize'] == 'yes'
    return {
        'output_files':
        carveSeams(
            source,
            target,
            sizeDonor,
            kwargs['inputmaskname'] if 'inputmaskname' in kwargs else None,
            keep_size=not keepSize,
            approach=getValue(kwargs, 'approach', defaultValue="backward"),
            energy=getValue(kwargs, 'energy', defaultValue="Sobel"))
    }, None
Esempio n. 5
0
    def get_video_orientation_change(self, source, target):
        source_data = self.getVideoMeta(source, show_streams=True)[0]
        donor_data = self.getVideoMeta(target, show_streams=True)[0]

        source_channel_data = source_data[ffmpeg_api.get_stream_indices_of_type(source_data, 'video')[0]]
        target_channel_data = donor_data[ffmpeg_api.get_stream_indices_of_type(donor_data, 'video')[0]]

        return int(getValue(target_channel_data, 'rotation', 0)) - int(getValue(source_channel_data, 'rotation', 0))
Esempio n. 6
0
 def _getTriggerUpdateArguments(self):
     names = set()
     for k, v in self.mandatoryparameters.iteritems():
         if getValue(v, 'trigger mask', False):
             names.add(k)
     for k, v in self.optionalparameters.iteritems():
         if getValue(v, 'trigger mask', False):
             names.add(k)
     return names
Esempio n. 7
0
 def arguments(self):
     args = self._base_arguments()
     edge = self.graph.get_edge(self.donor_start, self.donor_end)
     args['Start Time']['defaultvalue'] = getValue(edge,
                                                   'arguments.Start Time',
                                                   "1")
     end_def = getValue(edge, 'arguments.End Time', None)
     if end_def is not None:
         args['End Time']['defaultvalue'] = end_def
     return args
Esempio n. 8
0
def to_operations_md(place='.'):
    with open(os.path.join(place, 'operations.md'), 'w') as fp:
        for category, operations in operations_by_category().iteritems():
            write_header(fp, category, 1)
            for operation_name in operations:
                logging.getLogger('maskgen').info('Generating %s' %
                                                  operation_name)
                operation = getOperation(operation_name)
                write_header(
                    fp,
                    operation_name,
                    2,
                    status='deprecated' if operation.deprecated else None)
                write_text(fp, operation.description)
                default_inc_mask = getValue(operation.includeInMask, 'default')
                mask_included = {
                    file_type: getValue(operation.includeInMask, file_type,
                                        default_inc_mask)
                    for file_type in ['video', 'audio', 'image']
                }
                mask_included = [x for x, y in mask_included.iteritems() if y]
                fp.write('\nInclude as Probe Mask for [%s]\n' %
                         ', '.join(mask_included))
                write_header(fp, 'Mandatory Paramaters', 3)
                for key, definition in operation.mandatoryparameters.iteritems(
                ):
                    write_parameter(fp, key, definition)
                write_header(fp, 'Optional Paramaters', 3)
                for key, definition in operation.optionalparameters.iteritems(
                ):
                    write_parameter(fp, key, definition)
                write_header(fp, 'Validation Rules', 3)
                for rule in operation.rules:
                    write_graph_rule(fp, rule)
                write_header(fp, 'Allowed Transitions', 3)
                for rule in operation.transitions:
                    write_bullet(fp, rule)
                write_header(fp, 'Probe Generation Rules', 3)
                if operation.maskTransformFunction is None:
                    write_text(
                        fp,
                        "*Default*: resize, rotate, crop, and transform where applicable"
                    )
                else:
                    for media, rule in operation.maskTransformFunction.iteritems(
                    ):
                        write_mask_rule(fp, media, rule)
                write_header(fp, 'QA Questions', 3)
                if operation.qaList is not None:
                    for rule in operation.qaList:
                        write_bullet(fp, rule)
                write_header(fp, 'Analysis Rules', 3)
                for rule in operation.analysisOperations:
                    write_analysis(fp, rule)
Esempio n. 9
0
 def arguments(self):
     args = self._base_arguments()
     predecessors = self.graph.predecessors(self.donor_start)
     for pred in predecessors:
         edge = self.graph.get_edge(pred, self.donor_start)
         if edge['op'].startswith('Select'):
             args['Start Time']['defaultvalue'] = getValue(
                 edge, 'arguments.Start Time', "1")
             end_def = getValue(edge, 'arguments.End Time', None)
             if end_def is not None:
                 args['End Time']['defaultvalue'] = end_def
     return args
Esempio n. 10
0
 def create(self, arguments={}, invert=False):
     from maskgen.video_tools import getMaskSetForEntireVideoForTuples, FileMetaDataLocator
     from maskgen.tool_set import getMilliSecondsAndFrameCount
     end_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'End Time', "00:00:00"))
     start_time_tuple = getMilliSecondsAndFrameCount(
         getValue(arguments, 'Start Time', '00:00:00'))
     return getMaskSetForEntireVideoForTuples(
         FileMetaDataLocator(self.startFileName),
         start_time_tuple=start_time_tuple,
         end_time_tuple=end_time_tuple if end_time_tuple[0] > 0 else None,
         media_types=self.media_types())
Esempio n. 11
0
def check_mandatory(edge, opInfo, graph, frm, to):
    """
    Check mandatory parameters.
    Check optional parameters condition on mandatory parameter values.
    'inputmaskname' is treated special since it has historically been placed outside the arguments list.
    :param edge:
    :param opInfo:
    :param graph:
    :param frm:
    :param to:
    :return: list of (Severity, str)
    @type edge: dict
    @type op: Operation
    @type graph: ImageGraph
    @type frm: str
    @type to: str
    @rtype:  list of (Severity, str)
    """
    if opInfo.name == 'Donor':
        return []
    if opInfo.category == 'Bad':
        return [
            ValidationMessage(Severity.ERROR, frm, to, opInfo.name +
                              ' is not a valid operation', 'Mandatory', None)
        ] if opInfo.name != 'Donor' else []
    args = edge['arguments'] if 'arguments' in edge else []
    frm_file = graph.get_image(frm)[1]
    frm_file_type = fileType(frm_file)
    missing = [param for param in opInfo.mandatoryparameters.keys() if
               (param not in args or len(str(args[param])) == 0) and \
               ('source' not in opInfo.mandatoryparameters[param] or opInfo.mandatoryparameters[param][
                   'source'] == frm_file_type)]
    for param_name, param_definition in opInfo.optionalparameters.iteritems():
        if 'rule' in param_definition:
            if param_name not in args:
                for dep_param_name, dep_param_values in param_definition[
                        'rule'].iteritems():
                    if len(dep_param_values) > 0 and \
                         getValue(args, dep_param_name, defaultValue=dep_param_values[0]) in dep_param_values:
                        missing.append(param_name)
    missing = set(missing)
    inputmaskrequired = 'inputmaskname' in missing
    if inputmaskrequired:
        filename = getValue(edge, 'inputmaskname', defaultValue='')
        if len(filename) > 0 and os.path.exists(
                os.path.join(graph.dir, filename)):
            missing.remove('inputmaskname')
    return [
        ValidationMessage(Severity.ERROR, frm, to,
                          'Mandatory parameter ' + m + ' is missing',
                          'Mandatory', None) for m in missing
    ]
Esempio n. 12
0
def check_arguments(edge, op, graph, frm, to):
    """
    Check operation arguments are in the correct type format
    :param edge:
    :param op:
    :param graph:
    :param frm:
    :param to:
    :return:
    @type edge: dict
    @type op: Operation
    @type graph: ImageGraph
    @type frm: str
    @type to: str
    @rtype: list of (Severity, str)
    """
    if op.name == 'Donor':
        return []
    args = [(k, v) for k, v in op.mandatoryparameters.iteritems()]
    args.extend([(k, v) for k, v in op.optionalparameters.iteritems()])
    results = []
    for argName, argDef in args:
        try:
            argValue = getValue(edge, 'arguments.' + argName)
            if argValue:
                validateAndConvertTypedValue(argName, argValue, op)
        except ValueError as e:
            results.append(
                ValidationMessage(Severity.ERROR, frm, to, argName + str(e),
                                  'Argument {}'.format(argName), None))
    return results
Esempio n. 13
0
 def create(self, arguments={}, invert=False):
     import numpy as np
     if getValue(arguments, 'homography', 'None') == 'None':
         if self.startIm.has_alpha():
             img_array = np.asarray(self.startIm)
             mask = np.copy(img_array[:, :, 3])
             #accept the alpha channel as what is kept
             mask[mask > 0] = 255
             #invert since 0 in the donor mask indicates the donor pixels
             return ImageWrapper(mask).invert()
         # use the pre select mask (inverted) as the selection...invert what was removed to be what is kept
         return _pre_select_mask(self.graph, self.donor_start, self.startIm)
     mask = self.graph.get_edge_image(self.parent_of_end, self.donor_end,
                                      'arguments.pastemask')
     if mask is None:
         mask = self.graph.get_edge_image(self.parent_of_end,
                                          self.donor_end, 'maskname')
     mask, analysis = interpolateMask(mask,
                                      self.startIm,
                                      self.destIm,
                                      arguments=arguments,
                                      invert=invert)
     if mask is not None and mask.shape != (0, 0):
         mask = ImageWrapper(mask)
     else:
         mask = None
     return mask
Esempio n. 14
0
def rerunexif(project):
    """
    Save error report, project properties, composites, and donors
    :param sm: scenario model
    """
    sm = scenario_model.ImageProjectModel(project)
    plugin_map = {
        'AntiForensicExifQuantizationTable': 'CompressAs',
        'AntiForensicCopyExif': 'ExifMetaCopy',
        'AntiForensicEditExif': 'ExifGPSChange',
        'OutputTif': 'TIFF'
    }
    for edge_id in sm.getGraph().get_edges():
        edge = sm.getGraph().get_edge(edge_id[0], edge_id[1])
        # if a compression type operation
        if edge['op'] in [
                'AntiForensicExifQuantizationTable', 'AntiForensicCopyExif',
                'AntiForensicEditExif', 'OutputTif'
        ]:
            # has donor
            preds = [
                pred for pred in sm.getGraph().predecessors(edge_id[1])
                if pred != edge_id[0]
            ]
            if len(preds) > 0:
                donor_node = sm.getGraph().get_node(preds[0])
                target_node = sm.getGraph().get_node(edge_id[1])
                im, source_filename = sm.getImageAndName(edge_id[0])
                target_filenanme = os.path.join(sm.get_dir(),
                                                target_node['file'])
                plugin_name = plugin_map[edge['op']]
                kwargs = {
                    'donor': os.path.join(sm.get_dir(), donor_node['file']),
                    'rotate': 'yes'
                }
                doc = getValue(edge, 'arguments.degrees of change')
                if doc is not None:
                    kwargs['degrees of change'] = doc
                if plugin_name == 'TIFF':
                    exif.runexif([
                        '-P', '-q', '-m', '-TagsFromFile', donor_node['file'],
                        '-all:all', '-unsafe', target_filenanme
                    ])
                    createtime = exif.getexif(
                        target_filenanme,
                        args=['-args', '-System:FileCreateDate'],
                        separator='=')
                    if createtime is not None and '-FileCreateDate' in createtime:
                        exif.runexif([
                            '-overwrite_original', '-P', '-q', '-m',
                            '-System:fileModifyDate=' +
                            createtime['-FileCreateDate'], target_filenanme
                        ])
                else:
                    plugins.callPlugin(plugin_name, im, source_filename,
                                       target_filenanme, **kwargs)
            sm.reproduceMask(edge_id=edge_id)
    sm.save()
Esempio n. 15
0
    def check_compare_mask(edge, op, graph, frm, to):
        if op.generateMask in ['audio','meta','frames'] or \
                (graph.getNodeFileType(frm) not in ['video','image'] and \
                graph.getNodeFileType(to) not in ['video', 'image']):
            return []
        if len(getValue(edge, 'videomasks', [])) > 0:
            return []

        if getValue(edge,'maskname') is None or \
           getValue(edge,'maskname','') == '' or \
              not os.path.exists(os.path.join(graph.dir, edge['maskname'])):
            return [
                ValidationMessage(
                    Severity.ERROR, frm, to,
                    'Link mask is missing. Recompute the link mask.',
                    'Change Mask', None)
            ]
        return []
Esempio n. 16
0
 def test_remap2(self):
     network = {
         "directed":
         True,
         "graph": {
             "username": "******",
         },
         "nodes": [{
             "id": "A",
             "fooA": "barA"
         }, {
             "id": "B",
             "fooB": "barB"
         }, {
             "id": "C",
             "fooC": "barC"
         }, {
             "id": "D",
             "fooD": "barD",
             'source': 'A'
         }, {
             "id": "E",
             "fooE": "barE"
         }],
         "links": [{
             "source": "A",
             "target": "B"
         }, {
             "source": "A",
             "target": "D"
         }, {
             "source": "B",
             "target": "C"
         }, {
             "source": "C",
             "target": "D",
             "split": True
         }, {
             "source": "D",
             "target": "E",
             "foo": "bar"
         }],
         "multigraph":
         False
     }
     remapped = batch_project.remap_links(network)
     G = json_graph.node_link_graph(remapped,
                                    multigraph=False,
                                    directed=True)
     G = batch_project.separate_paths(G)
     for node_id in G.nodes():
         preds = [
             pred for pred in G.predecessors(node_id)
             if not getValue(G[pred][node_id], 'donor', False)
         ]
         self.assertTrue(len(preds) < 2)
     self.assertEqual(7, len(G.nodes()))
Esempio n. 17
0
def operations_by_category():
    result = {}
    for name, op in getOperations().iteritems():
        data = getValue(result, op.category, [])
        data.append(op.name)
        result[op.category] = data

    for category in result.keys():
        result[category] = sorted(result[category])
    return result
Esempio n. 18
0
    def load_image_json(self):
        fpath = getFileName(os.path.join("help", "image_linker.json"))

        with open(fpath) as f:
            self.linker = json.load(f)

        for key in self.linker.keys():
            for subkey in self.linker[key]:
                imgs = getValue(self.linker[key][subkey], 'images', []) #Get the raw list
                [self.extend_with_path_values(imgs, img) for img in imgs] #extend the list where applicable
                imgs = filter(None, map(self.try_get_slide, imgs)) #Get the paths and filter out missing/invalid
                self.linker[key][subkey]['images'] = imgs #replace value
Esempio n. 19
0
 def test_drop_then_add(self):
     filename = self.locateFile('tests/videos/sample1.mov')
     filename_output1 = os.path.join(
         os.path.dirname(os.path.abspath(filename)), 'sample_out1a.avi')
     kwargs = {
         'Start Time': 100,
         'seconds to drop': 2,
         'codec': 'XVID',
         'save histograms': 'yes'
     }
     args, error = plugins.callPlugin('FlowDrivenVideoFrameDrop', None,
                                      filename, filename_output1, **kwargs)
     self.filesToKill.append(filename_output1)
     self.assertTrue(error is None)
     frames1 = int(
         get_channel_data(
             ffmpeg_api.get_meta_from_video(filename, show_streams=True)[0],
             'video')[0]['nb_frames'])
     frames2 = int(
         get_channel_data(
             ffmpeg_api.get_meta_from_video(filename_output1,
                                            show_streams=True)[0],
             'video')[0]['nb_frames'])
     diff = frames1 - frames2
     self.assertTrue(diff > 0)
     diff_time = int(args['End Time']) - int(args['Start Time']) + 1
     self.assertEqual(diff, diff_time)
     filename_output2 = os.path.join(
         os.path.dirname(os.path.abspath(filename)), 'sample_out2a.avi')
     args['codec'] = 'XVID'
     if getValue(args, 'Frames to Add', 0) < 1:
         args['Frames to Add'] = 1
     print str(args)
     args, error = plugins.callPlugin('FlowDrivenVideoTimeWarp', None,
                                      filename_output1, filename_output2,
                                      **args)
     self.filesToKill.append(filename_output2)
     self.assertTrue(error is None)
     frames1 = int(
         get_channel_data(
             ffmpeg_api.get_meta_from_video(filename_output1,
                                            show_streams=True)[0],
             'video')[0]['nb_frames'])
     frames2 = int(
         get_channel_data(
             ffmpeg_api.get_meta_from_video(filename_output2,
                                            show_streams=True)[0],
             'video')[0]['nb_frames'])
     diff = frames2 - frames1
     self.assertTrue(diff > 0)
     diff_time = int(args['End Time']) - int(args['Start Time']) + 1
     print str(args)
     self.assertEqual(diff, diff_time)
Esempio n. 20
0
def transform(img, source, target, **kwargs):
    # source = zip of images
    if 'Registration Type' in kwargs:
        reg_type = kwargs['Registration Type']
    else:
        reg_type = 'ECC'
    zipf = ZipCapture(source)
    imgs = []
    logger = logging.getLogger("maskgen")

    retrieved, zip_image = zipf.read()
    if not retrieved:
        raise ValueError("Zip File {0} is empty".format(
            os.path.basename(source)))

    registrar = {
        'ECC': OpenCVECCRegistration(os.path.join(zipf.dir, zipf.names[0]))
    }
    reg_tool = registrar[reg_type]

    if 'Image Rotated' in kwargs and kwargs['Image Rotated'] == 'yes':
        try:
            orientation = getValue(zipf.get_exif(), 'Orientation', None)
        except KeyError:
            orientation = None
    else:
        orientation = None

    logger.debug("Beginning image alignment for " + os.path.basename(source))
    while retrieved:
        aligned = reg_tool.align(zip_image)
        imgs.append(aligned)
        retrieved, zip_image = zipf.read()
    logger.debug(os.path.basename(source) + " alignment complete")

    if not imgs:
        return None, False

    stacks = np.stack(np.asarray(imgs))
    median_img = np.median(stacks, 0)

    analysis = {'Merge Operation': 'Median Pixel'}
    if orientation is not None:
        analysis.update(exif.rotateAnalysis(orientation))
        median_img = exif.rotateAccordingToExif(median_img,
                                                orientation,
                                                counter=True)

    ImageWrapper(median_img).save(target, format='PNG')
    analysis['Image Rotated'] = 'yes' if 'rotation' in analysis else 'no'

    return analysis, None
Esempio n. 21
0
    def create(self, arguments={}, invert=False):
        from maskgen.tool_set import getMilliSecondsAndFrameCount
        media_types = ['video', 'audio'] if getValue(
            arguments, 'include audio', 'no') == 'yes' else ['video']

        from maskgen.video_tools import FileMetaDataLocator
        end_time_tuple = getMilliSecondsAndFrameCount(
            getValue(arguments, 'End Time', "00:00:00"))
        start_time_tuple = getMilliSecondsAndFrameCount(
            getValue(arguments, 'Start Time', '00:00:00'))
        video_set = FileMetaDataLocator(
            self.startFileName).getMaskSetForEntireVideoForTuples(
                start_time_tuple=start_time_tuple,
                end_time_tuple=end_time_tuple
                if end_time_tuple[1] > start_time_tuple[1] else None,
                media_types=media_types)
        audio_segments = [
            x for x in video_set if get_type_of_segment(x) == 'audio'
        ]
        video_segments = [
            x for x in video_set if get_type_of_segment(x) == 'video'
        ]

        if getValue(arguments, 'include audio', 'no') == 'yes':
            for audio_segment in audio_segments:
                video_segment = video_segments[0] if len(
                    video_segments) > 0 else audio_segment
                update_segment(
                    audio_segment,
                    type='audio',
                    starttime=get_start_time_from_segment(video_segment),
                    endtime=get_end_time_from_segment(video_segment),
                    startframe=int(
                        get_start_time_from_segment(video_segment) *
                        get_rate_from_segment(audio_segment) / 1000.0),
                    endframe=int(
                        get_end_time_from_segment(video_segment) *
                        get_rate_from_segment(audio_segment) / 1000.0) + 1)
        return video_set
Esempio n. 22
0
    def getVideoMetaItem(self, source, attribute, default=None, audio=False):
        """
        Featch meta data, overwriting any keys from the cache in the instance graph's node identified by source.
        :param source: source node id
        :param with_frames:
        :param show_streams:
        :param media_types:
        :return:
        """

        node_meta = self.__get_cache_from_graph(source)
        matched_value = None
        match_codec = 'video' if not audio else 'audio'
        for item in node_meta:
            if getValue(item, 'codec_type') == match_codec and matched_value is None:
                matched_value =  getValue(item,attribute)
        if matched_value is not None:
            return matched_value
        source_file = self.graph.get_image_path(source)
        if fileType(source_file) not in ['audio','video']:
            return default
        return ffmpeg_api.get_frame_attribute(source_file, attribute, default=default, audio=audio)
Esempio n. 23
0
def get_meta_data_change_from_edge(edge, expectedType='video'):
    """
    Inspect edge to see if vido meta-data changed such that the frame count is different
    as would be the case in a frame rate change.
    :param edge:
    :param expectedType:
    :return:
    """
    changeFrame = None
    changeDuration = None
    changeRate = None
    if 'metadatadiff' in edge and expectedType == 'video':
        change = getValue(edge, 'metadatadiff.video.nb_frames', ('x', 0, 0))
        changeFrame = change if change[0] == 'change' else None
        change = getValue(edge, 'metadatadiff.video.duration', ('x', 0, 0))
        changeDuration = change if change[0] == 'change' else None
        change = getValue(
            edge, 'metadatadiff.video.r_frame_rate',
            getValue(edge, 'metadatadiff.video.avg_frame_rate', ('x', 0, 0)))
        changeRate = change if change[0] == 'change' else None

    if not changeFrame and not changeDuration:
        return None

    try:
        if changeFrame and changeDuration and changeRate:
            if '/' in str(changeRate[2]):
                parts = changeRate[2].split('/')
                changeRate = float(parts[0]) / float(parts[1])
            else:
                changeRate = float(changeRate[2])
            return int(changeFrame[1]), \
                   float(changeDuration[1]) * 1000.0, \
                   int(changeFrame[2]), \
                   float(changeDuration[2]) * 1000.0, \
                   changeRate
    except:
        pass
    return None
Esempio n. 24
0
def write_parameter(fp, name, definition):
    fp.write('+ *')
    fp.write(name)
    fp.write('* : ')
    fp.write(getValue(definition, 'description', ''))
    fp.write('\n   - Type: %s\n' % definition['type'])
    if getValue(definition, 'source'):
        fp.write('    - Source Type: %s\n' % getValue(definition, 'source'))
    if getValue(definition, 'trigger mask'):
        fp.write('    - Create new Mask on Update\n')
    if getValue(definition, 'defaultvalue'):
        fp.write('    - Default: %s\n' % getValue(definition, 'defaultvalue'))
    if getValue(definition, 'values'):
        fp.write('    - Values: %s\n' %
                 ', '.join(getValue(definition, 'values')))
Esempio n. 25
0
 def _convertEdge(self, edge):
     """
     :param edge:
     :return: new for edge
     """
     new_operation_node = {}
     if 'plugin_name' in edge:
         new_operation_node['op_type'] = 'PluginOperation'
         new_operation_node['id'] = self.toID(edge['plugin_name'])
         new_operation_node['plugin'] = edge['plugin_name']
     else:
         new_operation_node['op_type'] = 'PreProcessedMediaOperation'
         new_operation_node['id'] = self.toID(edge['op'])
         op = getOperation(edge['op'], fake=True)
         new_operation_node['category'] = op.category
         new_operation_node['op'] = op.name
         new_operation_node['software'] = edge['softwareName']
         new_operation_node['software version'] = edge['softwareVersion']
         new_operation_node['description'] = edge['description']
         semanticGroups = getValue(edge, 'semanticGroups', None)
         if semanticGroups is not None and len(semanticGroups) > 0:
             new_operation_node['semanticGroups'] = semanticGroups
         new_operation_node[
             'directory'] = "{" + new_operation_node['id'] + "}".replace(
                 ' ', '_')
         if 'recordMaskInComposite' in edge:
             new_operation_node['recordMaskInComposite'] = edge[
                 'recordMaskInComposite']
     arguments = {}
     for k, v in getValue(edge, 'arguments', {}).iteritems():
         if k in ['function', 'video_function', 'audio_function']:
             continue
         value = self._convertArgument(v)
         if value is not None:
             arguments[k] = value
     new_operation_node['arguments'] = arguments
     return new_operation_node
    def test_audiozip_zip_link_tool(self):
        from maskgen.scenario_model import ZipAudioLinkTool, AudioZipLinkTool
        from maskgen.software_loader import Operation
        from maskgen.image_wrap import ImageWrapper
        from maskgen.support import getValue
        from maskgen.video_tools import get_end_frame_from_segment, get_frames_from_segment
        import os
        import numpy as np

        def create_zero(h, w):
            return ImageWrapper(np.zeros((h, w), dtype='uint8'))

        vida = self.locateFile('tests/zips/test.wav.zip')
        vidb = self.locateFile('videos/sample1.mov')
        image_values = {
            'a': (create_zero(300, 300), vida),
            'b': (create_zero(300, 300), vidb)
        }

        def get_image(arg):
            return image_values[arg]

        class SillyGraph:
            def get_node(self, name):
                return {'a': {}, 'b': {}}[name]

        tool = ZipAudioLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        scModel.getGraph = Mock(return_value=SillyGraph())
        mask, analysis, errors = tool.compareImages('a',
                                                    'b',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={},
                                                    analysis_params={})

        self.assertEqual(3, len(analysis['videomasks']))
        x = getValue(analysis, 'metadatadiff.audio.duration')
        x[1] = int(x[1])
        x[2] = int(x[2])
        self.assertEqual(['change', 35665, 59348], x)
        self.assertEqual(
            2617263, get_end_frame_from_segment(analysis['videomasks'][-1]))

        tool = AudioZipLinkTool()
        scModel = Mock()
        scModel.gopLoader = Mock()
        scModel.G.dir = '.'
        scModel.gopLoader.getOperationWithGroups = Mock(
            return_value=Operation(name='test', category='test'))
        scModel.getImageAndName = get_image
        scModel.getGraph = Mock(return_value=SillyGraph())
        mask, analysis, errors = tool.compareImages('b',
                                                    'a',
                                                    scModel,
                                                    'Normalization',
                                                    arguments={},
                                                    analysis_params={})

        self.assertEqual(1, len(analysis['videomasks']))
        x = getValue(analysis, 'metadatadiff.audio.duration')
        x[1] = int(x[1])
        x[2] = int(x[2])
        self.assertEqual(['change', 59348, 35665], x)
        self.assertEqual(
            1572865, get_end_frame_from_segment(analysis['videomasks'][-1]))
        self.assertEqual(1572865,
                         get_frames_from_segment(analysis['videomasks'][-1]))

        mask, analysis, errors = tool.compareImages(
            'b',
            'a',
            scModel,
            'Normalization',
            arguments={'Start Time': '00:00:01.000000'},
            analysis_params={})

        self.assertEqual(1, len(analysis['videomasks']))
        x = getValue(analysis, 'metadatadiff.audio.duration')
        x[1] = int(x[1])
        x[2] = int(x[2])
        self.assertEqual(['change', 59348, 35665], x)
        self.assertEqual(
            1572865, get_end_frame_from_segment(analysis['videomasks'][-1]))
        self.assertEqual(1528766,
                         get_frames_from_segment(analysis['videomasks'][-1]))
Esempio n. 27
0
    def __init__(self, master, link):
        Frame.__init__(self, master=master)
        self.master = master
        self.link = link
        self.checkboxes = CheckboxGroup(boxes=[])
        #Find this probe- could probably do this elsewhere and pass it in.
        self.edgeTuple = tuple(link.split("<-"))
        if len(self.edgeTuple) < 2:
            self.finalNodeName = link.split("->")[1]
            self.edgeTuple = tuple(link.split("->"))
        else:
            self.finalNodeName = None
        if (len(link.split('->')) > 1):
            probe = [
                probe for probe in master.probes
                if probe.edgeId[1] in master.lookup[self.edgeTuple[0]]
                and probe.finalNodeId in master.lookup[self.edgeTuple[1]]
            ][0]
        else:
            probe = \
            [probe for probe in master.probes if
             probe.edgeId[1] in master.lookup[self.edgeTuple[0]] and probe.donorBaseNodeId in
             master.lookup[
                 self.edgeTuple[1]]][0]
        self.probe = probe
        iFrame = Frame(self)
        c = Canvas(iFrame, width=35, height=35)
        c.pack()

        #Success Icon
        img = openImage(
            maskgen.tool_set.get_icon('RedX.png') if probe.
            failure else maskgen.tool_set.get_icon('check.png'))
        self.successIcon = ImageTk.PhotoImage(
            imageResizeRelative(img, (30, 30), img.size).toPIL())
        c.create_image(15,
                       15,
                       image=self.successIcon,
                       anchor=CENTER,
                       tag='things')

        #Layout
        row = 0
        col = 0
        self.optionsLabel = Label(self, text=self.link, font=(None, 10))
        self.optionsLabel.grid(row=row,
                               columnspan=3,
                               sticky='EW',
                               padx=(40, 0),
                               pady=10)
        iFrame.grid(column=0, row=0, columnspan=1, sticky=W)
        row += 1
        self.operationVar = StringVar(value="Operation [ Semantic Groups ]:")
        self.operationLabel = Label(self,
                                    textvariable=self.operationVar,
                                    justify=LEFT)
        self.semanticFrame = SemanticFrame(self)
        self.semanticFrame.grid(row=row + 1,
                                column=0,
                                columnspan=2,
                                sticky=N + W,
                                rowspan=1,
                                pady=10)
        row += 2
        #cImageFrame is used for plot, image and overlay
        self.cImgFrame = ttk.Notebook(self)
        self.cImgFrame.bind('<<NotebookTabChanged>>',
                            lambda a: self.frameMove())
        self.cImgFrame.grid(row=row, rowspan=8)
        self.descriptionVar = StringVar()
        self.descriptionLabel = Label(self,
                                      textvariable=self.operationVar,
                                      justify=LEFT)
        row += 8
        self.operationLabel.grid(row=row, columnspan=3, sticky='W', padx=10)
        row += 1
        textscroll = Scrollbar(self)
        textscroll.grid(row=row, column=col + 1, sticky=NS)
        self.commentBox = Text(self,
                               height=5,
                               width=80,
                               yscrollcommand=textscroll.set,
                               relief=SUNKEN)
        self.master.commentsBoxes[self.link] = self.commentBox
        self.commentBox.grid(row=row,
                             column=col,
                             padx=5,
                             pady=5,
                             columnspan=1,
                             rowspan=2,
                             sticky=NSEW)
        textscroll.config(command=self.commentBox.yview)
        col = 3
        row = 0
        scroll = Scrollbar(self)
        scroll.grid(row=row,
                    column=col + 2,
                    rowspan=5,
                    columnspan=1,
                    sticky=NS)

        self.pathList = Listbox(self,
                                width=30,
                                yscrollcommand=scroll.set,
                                selectmode=EXTENDED,
                                exportselection=0)
        self.pathList.grid(row=row,
                           column=col - 1,
                           rowspan=5,
                           columnspan=3,
                           padx=(30, 10),
                           pady=(20, 20))
        self.master.pathboxes[self] = self.semanticFrame.getListbox()
        scroll.config(command=self.pathList.yview)
        self.transitionVar = StringVar()

        edge = master.scModel.getGraph().get_edge(probe.edgeId[0],
                                                  probe.edgeId[1])
        self.operationVar.set(self.operationVar.get() +
                              master._compose_label(edge))
        master.edges[self] = [edge, self.semanticFrame.getListbox()]
        for sg in edge['semanticGroups'] if 'semanticGroups' in edge else []:
            self.semanticFrame.insertListbox(ANCHOR, sg)
        operation = master.scModel.getGroupOperationLoader(
        ).getOperationWithGroups(edge['op'])

        #QA checkboxes
        if operation.qaList is not None:
            args = getValue(edge, 'arguments', {})
            self.curOpList = [x for x in operation.qaList]
            for item_pos in range(len(self.curOpList)):
                item = self.curOpList[item_pos]
                try:
                    self.curOpList[item_pos] = item.format(**args)
                except:
                    pass
        else:
            self.curOpList = []
        row += 5
        if self.curOpList is None:
            master.qaData.set_qalink_status(self.link, 'yes')

        for q in self.curOpList:
            box_label = Label(self, text=q, wraplength=250, justify=LEFT)
            ck = Chkbox(parent=self,
                        dialog=master,
                        label=box_label,
                        value=master.qaData.get_qalink_status(link=link))
            ck.box.grid(row=row, column=col - 1)
            ck.label.grid(row=row, column=col, columnspan=4, sticky='W')
            self.checkboxes.boxes.append(ck)
            row += 1
        master.checkboxes[self] = self.checkboxes

        # Main Features- load the overlay for images, load plot graph & overlay page for videos
        if ('<-' in self.link and probe.donorVideoSegments is None
            ) or probe.targetVideoSegments is None:
            self.load_overlay(initialize=True)
        else:
            self.transitionString(None)
            self.setUpFrames()

        #Comment section
        currentComment = master.qaData.get_qalink_caption(self.link)
        self.commentBox.delete(1.0, END)
        self.commentBox.insert(
            END, currentComment if currentComment is not None else '')

        #Navigation Buttons
        self.acceptButton = Button(self,
                                   text='Next',
                                   command=master.nex,
                                   width=15)
        self.acceptButton.grid(row=12,
                               column=col + 2,
                               columnspan=2,
                               sticky='E',
                               padx=(20, 20))
        self.prevButton = Button(self,
                                 text='Previous',
                                 command=master.pre,
                                 width=15)
        self.prevButton.grid(row=12,
                             column=col - 1,
                             columnspan=2,
                             sticky='W',
                             padx=(20, 20))

        self.acceptnButton = Button(self,
                                    text='Next Unchecked',
                                    command=master.nexCheck,
                                    width=15)
        self.acceptnButton.grid(row=13,
                                column=col + 2,
                                columnspan=2,
                                sticky='E',
                                padx=(20, 20))
        self.prevnButton = Button(self,
                                  text='Previous Unchecked',
                                  command=master.preCheck,
                                  width=15)
        self.prevnButton.grid(row=13,
                              column=col - 1,
                              columnspan=2,
                              sticky='W',
                              padx=(20, 20))
        row = 14
        #Progress Bar
        pb = ttk.Progressbar(self,
                             orient='horizontal',
                             mode='determinate',
                             maximum=100.0001)
        pb.grid(row=row, column=0, sticky=EW, columnspan=8)
        pb.step(master.progress * 100)

        master.progressBars.append(pb)
Esempio n. 28
0
def transform(im, source, target, **kwargs):
    target_wrapper = ImageWrapper(packImgBits(np.asarray(im),
                                              int(getValue(kwargs,'bits to use',11))))
    target_wrapper.save(target)
    return None, None
Esempio n. 29
0
def run_node_rules(graph, node, external=False, preferences=None):
    import re
    import hashlib
    """

    :param graph: ImageGraph
    :param node:
    :param preferences:
    :return:
    @type preferences: MaskGenLoader
    @rtype: list of ValidationMessage
    @type frm: str
    @type to: str
    @type graph: ImageGraph
    """
    def rename(graph, start, end):
        node = graph.get_node(start)
        file = node['file']
        pattern = re.compile(r'[\|\'\"\(\)\,\$\? ]')
        new_name = re.sub(pattern, '_', file)
        os.rename(os.path.join(graph.dir, file),
                  os.path.join(graph.dir, new_name))
        node['file'] = new_name

    def remove_proxy(graph, start, end):
        node = graph.get_node(start)
        if 'proxyfile' in node:
            node.pop('proxyfile')

    errors = []
    nodeData = graph.get_node(node)
    multiplebaseok = graph.getDataItem('provenance',
                                       default_value='no') == 'yes'

    if 'file' not in nodeData:
        errors.append((Severity.ERROR, 'Missing file information.'))
    else:
        pattern = re.compile(r'[\|\'\"\(\)\,\$\?]')
        foundItems = pattern.findall(nodeData['file'])
        if foundItems:
            fix = rename if nodeData['nodetype'] == 'interim' else None
            errors.append(
                (Severity.ERROR,
                 "Invalid characters {}  used in file name {}.".format(
                     str(foundItems), nodeData['file']), fix))

    if nodeData['nodetype'] == 'final':
        fname = os.path.join(graph.dir, nodeData['file'])
        if os.path.exists(fname):
            with open(fname, 'rb') as rp:
                hashname = hashlib.md5(rp.read()).hexdigest()
                if hashname not in nodeData['file']:
                    errors.append(
                        (Severity.WARNING,
                         "Final image {} is not composed of its MD5.".format(
                             nodeData['file']), renameToMD5))
        proxy = getValue(nodeData, 'proxyfile', None)
        if proxy is not None:
            errors.append(
                (Severity.ERROR,
                 "Final media {} cannot be hidden by a proxy.".format(
                     nodeData['file']), remove_proxy))

    if nodeData['nodetype'] == 'base' and not multiplebaseok:
        for othernode in graph.get_nodes():
            othernodeData = graph.get_node(othernode)
            if node != othernode and othernodeData['nodetype'] == 'base':
                errors.append((Severity.ERROR,
                               "Projects should only have one base image"))

    if nodeData['nodetype'] in ('base', 'final', 'donor'):
        if 'file' not in nodeData:
            errors.append((Severity.ERROR, 'Missing media file'))
        else:
            file = nodeData['file']
            suffix_pos = file.rfind('.')
            if suffix_pos > 0:
                if file[suffix_pos:].lower() != file[suffix_pos:]:
                    errors.append(
                        (Severity.ERROR, nodeData['file'] + ' suffix (' +
                         file[suffix_pos:] + ') is not lower case'))
    return errors
Esempio n. 30
0
 def __get_cache_from_graph(self, source):
     node = self.graph.get_node(source)
     if node is not None:
         return getValue(node, 'media', {})
     return {}