Exemple #1
0
import os
from utils import DotDict, namedtuple_with_defaults, zip_namedtuple, config_as_dict

RandCropper = namedtuple_with_defaults(
    'RandCropper', 'min_crop_scales, max_crop_scales, \
    min_crop_aspect_ratios, max_crop_aspect_ratios, \
    min_crop_overlaps, max_crop_overlaps, \
    min_crop_sample_coverages, max_crop_sample_coverages, \
    min_crop_object_coverages, max_crop_object_coverages, \
    max_crop_trials', [0.0, 1.0, 0.5, 2.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 25])

RandPadder = namedtuple_with_defaults(
    'RandPadder', 'rand_pad_prob, max_pad_scale, fill_value', [0.0, 1.0, 127])

ColorJitter = namedtuple_with_defaults(
    'ColorJitter', 'random_hue_prob, max_random_hue, \
    random_saturation_prob, max_random_saturation, \
    random_illumination_prob, max_random_illumination, \
    random_contrast_prob, max_random_contrast',
    [0.0, 18, 0.0, 32, 0.0, 32, 0.0, 0.5])

cfg = DotDict()
cfg.ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))

# training configs
cfg.train = DotDict()
# random cropping samplers
cfg.train.rand_crop_samplers = [
    RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.1),
    RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.3),
    RandCropper(min_crop_scales=0.3, min_crop_overlaps=0.5),
def exportHistoryData(self, exportType):
    import flask
    import csv
    import StringIO
    import re
    from utils import namedtuple_with_defaults, prepare_dict, load_json, rename_duplicates

    history_dicts = self._getHistoryDicts()

    if history_dicts is not None:
        si = StringIO.StringIO()

        headers = [
            'File name', 'Timestamp', 'Success', 'Print time',
            'Filament length', 'Filament volume'
        ]
        fields = [
            'fileName', 'timestamp', 'success', 'printTime', 'filamentLength',
            'filamentVolume'
        ]
        if exportType == 'csv':
            writer = csv.writer(si, quoting=csv.QUOTE_ALL)
            writer.writerow(headers)

            for historyDetails in history_dicts:
                output = list()
                for field in fields:
                    value = historyDetails.get(field, '-')
                    output.append(value if value is not None else '-')
                writer.writerow(output)

            response = flask.make_response(si.getvalue())
            response.headers["Content-type"] = "text/csv"
            response.headers[
                "Content-Disposition"] = "attachment; filename=octoprint_print_history_export.csv"
        elif exportType == 'csv_extra':
            unused_fields = ["spool", "user", "note", "id", "parameters"]
            csv_header = set(fields)

            for historyDetails in history_dicts:
                parameters = load_json(historyDetails, "parameters")
                csv_header |= set(parameters.keys())
            # Doesn't handle Camelcase
            csv_header = map(lambda x: x.replace(" ", "_"), csv_header)
            csv_header = rename_duplicates(fields, csv_header, prefix="g")
            rearranged_header = fields[:]
            for column in csv_header:
                if column not in headers:
                    rearranged_header.append(column)
            csv_header = rearranged_header

            ParametersRow = namedtuple_with_defaults('TableRow', csv_header)
            writer = csv.writer(si, quoting=csv.QUOTE_ALL)
            writer.writerow(csv_header)
            for historyDetails in history_dicts:
                parameters = load_json(historyDetails, "parameters")
                historyDetails.update(parameters)
                for key in unused_fields:
                    if key in history_dicts:
                        historyDetails.pop(key)

                for key in [
                        "Plastic volume", "Plastic weight", "Filament length"
                ]:
                    if historyDetails.get(key, None):
                        historyDetails[key] = re.search(
                            r"[\d\.]*", historyDetails[key]).group(0)
                if historyDetails.get("Build time", None):
                    # Randomly failed
                    try:
                        match = re.match(
                            r"(\d+) hours (\d+) minutes",
                            historyDetails.get("Build time", None))
                        historyDetails["Build time"] = (
                            int(match.group(1)) * 60 +
                            int(match.group(2))) * 60
                    except:
                        pass
                parameters_row = ParametersRow(**prepare_dict(historyDetails))
                writer.writerow([
                    getattr(parameters_row, field)
                    for field in parameters_row._fields
                ])

            response = flask.make_response(si.getvalue())
            response.headers["Content-type"] = "text/csv"
            response.headers[
                "Content-Disposition"] = "attachment; filename=octoprint_print_history(extra)_export.csv"
        elif exportType == 'excel':
            import xlsxwriter

            workbook = xlsxwriter.Workbook(si)
            worksheet = workbook.add_worksheet()
            for column, header in enumerate(headers):
                worksheet.write(0, column, header)

            for row, historyDetails in enumerate(history_dicts):
                for column, field in enumerate(fields):
                    value = historyDetails.get(field, '-')
                    worksheet.write(row + 1, column,
                                    (value if value is not None else '-'))

            workbook.close()

            response = flask.make_response(si.getvalue())
            response.headers["Content-type"] = "application/vnd.ms-excel"
            response.headers[
                "Content-Disposition"] = "attachment; filename=octoprint_print_history_export.xlsx"

        return response
    else:
        return flask.make_response("No history file", 400)
Exemple #3
0
import argparse
import collections
import torch
import numpy as np
import data_loaders as module_data
import trainer as trainers_module
import model.loss as module_loss
import model.metric as module_metric
import model as module_arch
from parse_config import ConfigParser
from trainer import Trainer, ProbabilisticTrainer
from polyaxon_client.tracking import Experiment, get_data_paths, get_outputs_path
import utils as util

CustomArgs = util.namedtuple_with_defaults('CustomArgs',
                                           'flags type target action help',
                                           (None, ) * 5)


class BaseRunner:
    def __init__(self, description="Base Parser Description"):
        self.static_arguments = argparse.ArgumentParser(
            description=description)
        self.dynamic_arguments = []

    def add_static_arguments(self):
        """
            Arguments which are not related to the json file
            Where spicifc logic need to be performed for configuration
            purposes for example
        """
Exemple #4
0
from utils import namedtuple_with_defaults

from django.conf import settings

Junction = namedtuple_with_defaults(
    'Junction', ['ref', 'ref_count', 'contig', 'contig_count', 'annotation'],
    [None, -1, None, -1, []])


def get_jbrowse_link_for_contig_aligned_to_ref_genome(contig, loc):
    """Returns JBrowse to the contig aligned against the reference genome.
    """
    sample_alignment = contig.experiment_sample_to_alignment

    # HACK: Workaround to using type=Dataset.TYPE.BWA_ALIGN, because this
    # file is imported by main/models.py, so importing Dataset here causes a
    # circular import error.
    # TODO(gleb): Figure out better organization
    bam_dataset = sample_alignment.dataset_set.get(type='BWA BAM')

    sample_bam_track = '_'.join([
        bam_dataset.internal_string(sample_alignment.experiment_sample),
        str(sample_alignment.alignment_group.uid)
    ])

    track_labels = settings.JBROWSE_DEFAULT_TRACKS + [sample_bam_track]
    return (contig.parent_reference_genome.get_client_jbrowse_link() +
            '&loc=' + str(loc) + '&tracks=' + ','.join(track_labels))


def decorate_with_link_to_loc(contig, loc, text):
Exemple #5
0
import os
from utils import DotDict, namedtuple_with_defaults, zip_namedtuple, config_as_dict

RandCropper = namedtuple_with_defaults('RandCropper',
    'min_crop_scales, max_crop_scales, \
    min_crop_aspect_ratios, max_crop_aspect_ratios, \
    min_crop_overlaps, max_crop_overlaps, \
    min_crop_sample_coverages, max_crop_sample_coverages, \
    min_crop_object_coverages, max_crop_object_coverages, \
    max_crop_trials',
    [0.0, 1.0,
    0.5, 2.0,
    0.0, 1.0,
    0.0, 1.0,
    0.0, 1.0,
    25])

RandPadder = namedtuple_with_defaults('RandPadder',
    'rand_pad_prob, max_pad_scale, fill_value',
    [0.0, 1.0, 127])

ColorJitter = namedtuple_with_defaults('ColorJitter',
    'random_hue_prob, max_random_hue, \
    random_saturation_prob, max_random_saturation, \
    random_illumination_prob, max_random_illumination, \
    random_contrast_prob, max_random_contrast',
    [0.0, 18,
    0.0, 32,
    0.0, 32,
    0.0, 0.5])
from utils import namedtuple_with_defaults

from django.conf import settings


Junction = namedtuple_with_defaults('Junction',
        ['ref', 'ref_count', 'contig', 'contig_count', 'annotation'],
        [None, -1, None, -1, []])


def get_jbrowse_link_for_contig_aligned_to_ref_genome(contig, loc):
    """Returns JBrowse to the contig aligned against the reference genome.
    """
    sample_alignment = contig.experiment_sample_to_alignment

    # HACK: Workaround to using type=Dataset.TYPE.BWA_ALIGN, because this
    # file is imported by main/models.py, so importing Dataset here causes a
    # circular import error.
    # TODO(gleb): Figure out better organization
    bam_dataset = sample_alignment.dataset_set.get(
            type='BWA BAM')

    sample_bam_track = '_'.join([
            bam_dataset.internal_string(sample_alignment.experiment_sample),
            str(sample_alignment.alignment_group.uid)])

    track_labels = settings.JBROWSE_DEFAULT_TRACKS + [sample_bam_track]
    return (contig.parent_reference_genome.get_client_jbrowse_link() +
            '&loc=' + str(loc) +
            '&tracks=' + ','.join(track_labels))