示例#1
0
    def make_file(self, binary=None):
        """Return readable and writable temporary file.

        Arguments:
            binary - unused. Here is only for backward compatibility
        """
        if 'wsgi.file_callback' in self.environ:
            return self.environ['wsgi.file_callback'](self.filename)
        else:
            return CgiFieldStorage.make_file(self)
示例#2
0
    def make_file(self, binary=None):
        """Return readable and writable temporary file.

        Arguments:
            binary - unused. Here is only for backward compatibility
        """
        if 'wsgi.file_callback' in self.environ:
            return self.environ['wsgi.file_callback'](self.filename)
        else:
            return CgiFieldStorage.make_file(self)
示例#3
0
    def _upload_test_data(self):
        """
        Upload test datasets, which are defined in the <test></test>
        section of the provided tool.
        """
        if not self._tool.tests:
            raise ValueError('Tests are not defined.')

        self._test = self._tool.tests[0]

        # All inputs with the type 'data'
        self._data_inputs = {
            x.name: x
            for x in self._tool.input_params if x.type == 'data'
        }

        # Datasets from the <test></test> section
        test_datasets = {
            input_name: self._test.inputs[input_name][0]
            for input_name in self._data_inputs.keys()
            if input_name in self._test.inputs.keys()
        }

        # Conditional datasets
        for name, value in self._test.inputs.items():
            if '|' in name:
                input_name = name.split('|')[1]
                if input_name in self._data_inputs.keys():
                    test_datasets.update(
                        {input_name: self._test.inputs[name][0]})

        if not test_datasets.keys():
            not_supported_input_types = [
                k for k, v in self._tool.inputs.items()
                if v.type == 'repeat' or v.type == 'data_collection'
            ]
            if not_supported_input_types:
                raise ValueError('Not supported input types.')
            else:
                # Some tests don't have data inputs at all,
                # so we can generate a tour without them
                self._use_datasets = False
                return

        test_data_paths = [os.path.abspath('test-data')]
        test_data_cache_dir = os.path.abspath(
            os.environ.get('GALAXY_TEST_DATA_REPO_CACHE', 'test-data-cache'))
        test_data_paths.extend(
            [x[0] for x in os.walk(test_data_cache_dir) if '.git' not in x[0]])
        if self._tool.tool_shed:
            test_data_paths.append(
                os.path.abspath(os.path.join(self._tool.tool_dir,
                                             'test-data')))

        # Upload all test datasets
        for input_name, input in self._data_inputs.items():
            if input_name in test_datasets.keys():
                for i, data_path in enumerate(test_data_paths):
                    input_path = os.path.join(data_path,
                                              test_datasets[input_name])
                    if os.path.exists(input_path):
                        break
                    elif i + 1 == len(test_data_paths):  # the last path
                        raise ValueError('Test dataset "%s" doesn\'t exist.' %
                                         input_name)

                upload_tool = self._trans.app.toolbox.get_tool('upload1')
                filename = os.path.basename(input_path)

                with open(input_path, 'r') as f:
                    content = f.read()
                    headers = {
                        'content-disposition':
                        'form-data; name="{}"; filename="{}"'.format(
                            'files_0|file_data', filename),
                    }

                    input_file = FieldStorage(headers=headers)
                    input_file.file = input_file.make_file()
                    input_file.file.write(content)

                    inputs = {
                        'dbkey': '?',  # is it always a question mark?
                        'file_type': input.extensions[0],
                        'files_0|type': 'upload_dataset',
                        'files_0|space_to_tab': None,
                        'files_0|to_posix_lines': 'Yes',
                        'files_0|file_data': input_file,
                    }

                    params = Params(inputs, sanitize=False)
                    incoming = params.__dict__
                    output = upload_tool.handle_input(self._trans,
                                                      incoming,
                                                      history=None)

                    job_errors = output.get('job_errors', [])
                    if job_errors:
                        # self._errors.extend(job_errors)
                        raise ValueError('Cannot upload a dataset.')
                    else:
                        self._hids.update(
                            {input_name: output['out_data'][0][1].hid})
示例#4
0
    def _upload_test_data(self):
        """
        Upload test datasets, which are defined in the <test></test>
        section of the provided tool.
        """
        if not self._tool.tests:
            raise ValueError('Tests are not defined.')

        self._test = self._tool.tests[0]

        # All inputs with the type 'data'
        self._data_inputs = {x.name: x for x in self._tool.input_params if
                             x.type == 'data'}

        # Datasets from the <test></test> section
        test_datasets = {
            input_name: self._test.inputs[input_name][0]
            for input_name in self._data_inputs.keys()
            if input_name in self._test.inputs.keys()
        }

        # Conditional datasets
        for name, value in self._test.inputs.items():
            if '|' in name:
                input_name = name.split('|')[1]
                if input_name in self._data_inputs.keys():
                    test_datasets.update({
                        input_name: self._test.inputs[name][0]
                    })

        if not test_datasets.keys():
            not_supported_input_types = [
                k for k, v in self._tool.inputs.items() if
                v.type == 'repeat' or v.type == 'data_collection'
            ]
            if not_supported_input_types:
                raise ValueError('Not supported input types.')
            else:
                # Some tests don't have data inputs at all,
                # so we can generate a tour without them
                self._use_datasets = False
                return

        test_data_paths = [os.path.abspath('test-data')]
        test_data_cache_dir = os.path.abspath(
            os.environ.get('GALAXY_TEST_DATA_REPO_CACHE', 'test-data-cache'))
        test_data_paths.extend([
            x[0] for x in os.walk(test_data_cache_dir) if '.git' not in x[0]])
        if self._tool.tool_shed:
            test_data_paths.append(os.path.abspath(os.path.join(
                self._tool.tool_dir, 'test-data')))

        # Upload all test datasets
        for input_name, input in self._data_inputs.items():
            if input_name in test_datasets.keys():
                for i, data_path in enumerate(test_data_paths):
                    input_path = os.path.join(data_path,
                                              test_datasets[input_name])
                    if os.path.exists(input_path):
                        break
                    elif i + 1 == len(test_data_paths):  # the last path
                        raise ValueError('Test dataset "%s" doesn\'t exist.' %
                                         input_name)

                upload_tool = self._trans.app.toolbox.get_tool('upload1')
                filename = os.path.basename(input_path)

                with open(input_path, 'r') as f:
                    content = f.read()
                    headers = {
                        'content-disposition':
                            'form-data; name="{}"; filename="{}"'.format(
                                'files_0|file_data', filename
                            ),
                    }

                    input_file = FieldStorage(headers=headers)
                    input_file.file = input_file.make_file()
                    input_file.file.write(content)

                    inputs = {
                        'dbkey': '?',  # is it always a question mark?
                        'file_type': input.extensions[0],
                        'files_0|type': 'upload_dataset',
                        'files_0|space_to_tab': None,
                        'files_0|to_posix_lines': 'Yes',
                        'files_0|file_data': input_file,
                    }

                    params = Params(inputs, sanitize=False)
                    incoming = params.__dict__
                    output = upload_tool.handle_input(self._trans, incoming,
                                                      history=None)

                    job_errors = output.get('job_errors', [])
                    if job_errors:
                        # self._errors.extend(job_errors)
                        raise ValueError('Cannot upload a dataset.')
                    else:
                        self._hids.update({
                            input_name: output['out_data'][0][1].hid
                        })