Beispiel #1
0
def register(fixed_volume_path, moving_volume_path, output_folder,
             params_path):
    output_image_path = r"{}\new_vol.hdf5".format(output_folder)

    moving_image = read_volume(moving_volume_path)
    fixed_image = read_volume(fixed_volume_path)

    # moving_image = normalize_ct_volume(moving_image)
    # fixed_image = normalize_ct_volume(fixed_image)

    # show_histogram(fixed_image, moving_image)

    # run_slicer_functionality({'moving_image': moving_image, 'fixed_image': fixed_image}, int(moving_image.shape[1] / 2))
    # exit()

    # rotated_moving_image = rotate_and_save(moving_image)
    # unrotated = moving_image.copy()
    moving_image = transform_moving_image(moving_image)

    moving_image, fixed_image = crop_volumes(moving_image, fixed_image)

    res_image_array = move_moving_image_to_fixed(moving_image, fixed_image,
                                                 params_path, output_folder)

    copy_and_add_volume(fixed_volume_path, output_image_path, res_image_array)

    all_slicer_images = OrderedDict([
        # ('moving', unrotated),
        ('moving_rotated', moving_image),
        ('moving_result', res_image_array),
        ('fixed', fixed_image)
    ])

    Slicer(all_slicer_images, int(fixed_image.shape[1] / 2)).show()
Beispiel #2
0
    def runSlicer(self):
        """ Launch an instance of Slicer with the current state of vars.

            Function that runs when the user presses the run button """
        # DEBUG:
        self.printSlicerVars()

        num_iters = self.num_slices_entry.get()
        if not num_iters:
            num_iters = self.num_imgs
        else:
            num_iters = int(num_iters)

        slicer = Slicer(
            in_dir = self.curr_dir_lbl.get(),
            out_dir = self.out_dir_lbl.get(),
            img_ext = self.img_ext,
            mode = self.mode.get(),
            reverse = self.reverse.get(),
            curve_depth = self.curve_depth,
            num_slices = num_iters
        )
        self.progress["value"] = 0
        self.progress["maximum"] = num_iters

        slice_thread = threading.Thread(target=slicer.slice)
        slice_thread.daemon = True
        slice_thread.start()

        self.slicer_running = True

        prog_thread = threading.Thread(target=self.watchProgress, args=(slicer, num_iters))
        prog_thread.daemon = True
        prog_thread.start()
 def create_csurf_map(self, map_file):
     file_reader = open(map_file, 'r')
     lines = file_reader.readlines()
     csurf_map = defaultdict()
     for line in lines:
         path, sep, build_name = line.partition('\t')
         csurf_map[path] = build_name.split('\n')[0]
     self.csurf_map = csurf_map
     self.slicer = Slicer()
Beispiel #4
0
    def __init__(
        self,
        values,
        base_values = None,
        data = None,
        display_data = None,
        instance_names = None,
        feature_names = None,
        output_names = None,
        output_indexes = None,
        lower_bounds = None,
        upper_bounds = None,
        main_effects = None,
        hierarchical_values = None,
        clustering = None
    ):
        self.op_history = []

        # cloning. TODO: better cloning :)
        if issubclass(type(values), Explanation):
            e = values
            values = e.values
            base_values = e.base_values
            data = e.data
            
        output_dims = compute_output_dims(values, base_values, data)

        if len(_compute_shape(feature_names)) == 1: # TODO: should always be an alias once slicer supports per-row aliases
            values_shape = _compute_shape(values)
            if len(values_shape) >= 1 and len(feature_names) == values_shape[0]:
                feature_names = Alias(list(feature_names), 0)
            elif len(values_shape) >= 2 and len(feature_names) == values_shape[1]:
                feature_names = Alias(list(feature_names), 1)
        
        if len(_compute_shape(output_names)) == 1: # TODO: should always be an alias once slicer supports per-row aliases
            values_shape = _compute_shape(values)
            if len(values_shape) >= 1 and len(output_names) == values_shape[0]:
                output_names = Alias(list(output_names), 0)
            elif len(values_shape) >= 2 and len(output_names) == values_shape[1]:
                output_names = Alias(list(output_names), 1)
                
        self._s = Slicer(
            values = values,
            base_values = None if base_values is None else Obj(base_values, [0] + list(output_dims)),
            data = data,
            display_data = display_data,
            instance_names = None if instance_names is None else Alias(instance_names, 0),
            feature_names = feature_names, 
            output_names =  output_names, # None if output_names is None else Alias(output_names, output_dims),
            output_indexes = None if output_indexes is None else (output_dims, output_indexes),
            lower_bounds = lower_bounds,
            upper_bounds = lower_bounds,
            main_effects = main_effects,
            hierarchical_values = hierarchical_values,
            clustering = None if clustering is None else Obj(clustering, [0])
        )
Beispiel #5
0
    def __init__(self,
                 expected_value,
                 values,
                 data=None,
                 output_shape=tuple(),
                 interaction_order=0,
                 instance_names=None,
                 input_names=None,
                 output_names=None,
                 output_indexes=None,
                 feature_types=None,
                 lower_bounds=None,
                 upper_bounds=None,
                 main_effects=None,
                 hierarchical_values=None,
                 partition_tree=None):

        input_shape = _compute_shape(data)
        values_dims = list(
            range(len(input_shape) + interaction_order + len(output_shape)))
        output_dims = range(
            len(input_shape) + interaction_order, values_dims[-1])

        #main_effects_inds = values_dims[0:len(input_shape)] + values_dims[len(input_shape) + interaction_order:]
        self.output_names = output_names  # TODO: needs to tracked after slicing still

        kwargs_dict = {}
        if lower_bounds is not None:
            kwargs_dict["lower_bounds"] = (values_dims, Slicer(lower_bounds))
        if upper_bounds is not None:
            kwargs_dict["upper_bounds"] = (values_dims, Slicer(upper_bounds))
        if main_effects is not None:
            kwargs_dict["main_effects"] = (values_dims, Slicer(main_effects))
        if output_indexes is not None:
            kwargs_dict["output_indexes"] = (output_dims,
                                             Slicer(output_indexes))
        if output_names is not None:
            kwargs_dict["output_names"] = (output_dims, Slicer(output_names))
        if hierarchical_values is not None:
            kwargs_dict["hierarchical_values"] = (hierarchical_values,
                                                  Slicer(hierarchical_values))
        if partition_tree is not None:
            kwargs_dict["partition_tree"] = (partition_tree,
                                             Slicer(partition_tree))

        super().__init__(data, values, input_shape, output_shape,
                         expected_value, interaction_order, instance_names,
                         input_names, feature_types, **kwargs_dict)
Beispiel #6
0
    def slice(self):
        print "Taints: %s" % scanf_taint.taints
        taints = []
        for _, v in self._hooks.iteritems():
            taints.extend(v.taints)

        target_tmps, target_regs, target_addrs = self._slice_from_last_condition()

        try:
            slicer = Slicer(self._project, self._path, \
                            target_tmps, target_regs, target_addrs, \
                            self._mem_reads, self._mem_writes, taints)
            slicer.slice()
        except SlicerError:
            raise TracerError("Slicer failed")

        sources = self.insts_to_source(sorted(slicer.instructions))
        for line in sources:
            print line
Beispiel #7
0
    def __init__(self, size=(3, 3)):
        """
        placeholder
        """

        # list of train and test directories
        self._annotation_suffix = '_Annotated_Cars.png'

        # 15cm resolution
        self._GSD = 0.15
        self._size = (int(round(
            (size[0] / self._GSD) / 2)), int(round((size[1] / self._GSD) / 2)))

        # xml conversion tweak
        self._custom_item_func = lambda x: 'object'

        # create image slicer
        self._slicer = Slicer()

        return
Beispiel #8
0
    def __init__(
        self,
        values,
        base_values=None,
        data=None,
        display_data=None,
        instance_names=None,
        feature_names=None,
        output_names=None,
        output_indexes=None,
        lower_bounds=None,
        upper_bounds=None,
        main_effects=None,
        hierarchical_values=None,
        clustering=None,
        interactions=None,
        feature_groups=None,
    ):
        self.op_history = []

        # cloning. TODO: better cloning :)
        if issubclass(type(values), Explanation):
            e = values
            values = e.values
            base_values = e.base_values
            data = e.data

        output_dims = compute_output_dims(values, base_values, data)

        if len(
                _compute_shape(feature_names)
        ) == 1:  # TODO: should always be an alias once slicer supports per-row aliases
            values_shape = _compute_shape(values)
            if len(values_shape) >= 1 and len(
                    feature_names) == values_shape[0]:
                feature_names = Alias(list(feature_names), 0)
            elif len(values_shape) >= 2 and len(
                    feature_names) == values_shape[1]:
                feature_names = Alias(list(feature_names), 1)

        if len(
                _compute_shape(output_names)
        ) == 1:  # TODO: should always be an alias once slicer supports per-row aliases
            values_shape = _compute_shape(values)
            output_names = Alias(list(output_names), output_dims[0])
            # if len(values_shape) >= 1 and len(output_names) == values_shape[0]:
            #     output_names = Alias(list(output_names), 0)
            # elif len(values_shape) >= 2 and len(output_names) == values_shape[1]:
            #     output_names = Alias(list(output_names), 1)

        if output_names is not None and not isinstance(output_names, Alias):
            l = len(_compute_shape(output_names))
            if l == 0:
                pass
            elif l == 1:
                output_names = Obj(output_names, output_dims)
            elif l == 2:
                output_names = Obj(output_names, [0] + list(output_dims))
            else:
                raise ValueError(
                    "shap.Explanation does not yet support output_names of order greater than 3!"
                )

        self._s = Slicer(
            values=values,
            base_values=None if base_values is None else Obj(
                base_values, [0] + list(output_dims)),
            data=data,
            display_data=display_data,
            instance_names=None if instance_names is None else Alias(
                instance_names, 0),
            feature_names=feature_names,
            output_names=output_names,
            output_indexes=None if output_indexes is None else
            (output_dims, output_indexes),
            lower_bounds=lower_bounds,
            upper_bounds=upper_bounds,
            main_effects=main_effects,
            hierarchical_values=hierarchical_values,
            clustering=None if clustering is None else Obj(clustering, [0]),
            interactions=interactions,
            feature_groups=feature_groups)
Beispiel #9
0
def Compute3DDice(PID: Union[int, List[int]],
                  netparams: str,
                  patchsize: int,
                  batch: int = 10,
                  bydim: int = 1,
                  doeval: bool = True,
                  dev: str = 'cpu',
                  step: int = 0,
                  saveout: bool = False,
                  savename: str = 'x') -> List[float]:
    #OBS: in case of deepmed, patchsize means the size of output patch!
    #(i.e. if patchsize=9, the input to network will be 25x25) <-but this done in the code
    #step = in what steps you take patches. if ==0, you take nonoverlapping ones. if K, patch starts
    # at prev_patch_start+K.
    #saveout = whether we save the ful subject output. (for viewing and debugging)

    # GET NET:
    net, in1, in2, in3D = getNetwork(netparams, dev)
    if doeval:
        net.eval()
    else:
        net.train()
    device = torch.device(dev)
    print('Net loaded.')
    # CUT AND EVAL: loop through cutting smaller pieces, moving to torch and eval
    if isinstance(PID, int):
        PID = [PID]
    segmented = torch.zeros((1, 7), device=dev)
    existing = torch.zeros((1, 7), device=dev)
    intersec = torch.zeros(
        (1, 7), device=dev
    )  #these three needed to gather results, for post dice compute
    Dices = torch.zeros((len(PID), 7), device=dev)
    axes = [0, 2, 3] + ([4] if in3D else [])

    #set the right function to use
    TensorCropping = CenterCropTensor3d
    padding = [(0, 0), (0, patchsize), (0, patchsize),
               (0, patchsize)]  #(16,patchsize+16)
    paddingall = [(0, 0), (0, patchsize), (0, patchsize),
                  (0, patchsize)]  #(16,patchsize+16)

    if in2:  #deep med, we need to pad the input on all sides to be able to cut pieces as wanted
        paddingall[1:] = [(16 + 8, patchsize + 16 + 8)] * 3
        patchsize = patchsize - 16
        #since patchsize, as it goes into slicer, means the size of network output

    if not in3D:
        padding[bydim + 1] = (0, 0)
        paddingall[bydim + 1] = (0, 0)
        TensorCropping = CenterCropTensor

    # LOAD DATA:
    for idx, pid in enumerate(PID):
        #set accumulators to 0:
        segmented.zero_()
        existing.zero_()
        intersec.zero_()

        allin, gt, mask = loadSubject(pid, patchsize // 2)

        size_full = allin[0].shape  #shape of 3d img, one channel

        mask = np.pad(mask, padding[1:], mode='constant')
        gt = np.pad(gt, padding, mode='constant')
        allin = np.pad(allin, paddingall, mode='constant')
        #  print((size_full, gt.shape))
        empty_subj = torch.zeros(
            gt.shape[1:])  #allin.shape[1:]) #cause we dont need channels

        slicer = Slicer(size_full, patchsize, in1, in2, in3D, bydim,
                        step)  #return string slice, include all channels
        # for cutting out the middle part based on step:
        #slice((sf-step)//2, sf-np.ceil((sf-step)/2))
        slicing = "".join([
            f'.narrow({idx}, {(patchsize-step)//2}, {step})'
            for idx in range(2, (4 + in3D))
        ]) if step > 0 else ""
        paddingup = [0, patchsize - step] * 3
        if not in3D:
            paddingup[-1 - bydim * 2] = 0

        print(f'Eval on subj{pid}...')
        with torch.no_grad():
            while slicer.todo > 0:
                gtslices, in1slices, in2slices = slicer.get_batch(
                    batch)  #multiple slices

                gts = np.stack(list(
                    map(eval, [f'gt[{slajs}]' for slajs in gtslices])),
                               axis=0)
                in1s = np.stack(list(
                    map(eval, [f'allin[{slajs}]' for slajs in in1slices])),
                                axis=0)
                #maske = np.stack([eval(f'mask[{slajs[2:]}]') for slajs in gtslices], axis=0)
                maske = np.stack(list(
                    map(eval, [f'mask[{slajs[2:]}]' for slajs in gtslices])),
                                 axis=0)

                # move to torch:
                target_oh = torch.from_numpy(gts).squeeze().to(device)
                data = [torch.from_numpy(in1s).squeeze().float().to(device)
                        ]  #input 1
                if in2:
                    #in2s = np.stack([eval(f'allin[{slajs}]') for slajs in in2slices], axis=0)
                    in2s = np.stack(list(
                        map(eval, [f'allin[{slajs}]' for slajs in in2slices])),
                                    axis=0)
                    data.append(
                        torch.from_numpy(in2s).squeeze().float().to(
                            device))  #input 2

                #run net on data. get output, save sums in dice gather lists
                out = net(*data).exp()
                target_oh, out = TensorCropping(
                    target_oh, out
                )  #in case of PSP net, might be that output is bigger than input/GT
                #dices = AllDices(out, target_oh)
                maske = torch.from_numpy(maske).squeeze().unsqueeze(
                    1).float().to(device)

                #cut only the middle part of OUT, MASKE and TARGET_OH for eval (depending on the step size)
                maske = eval('maske' + slicing)
                target_oh = eval('target_oh' + slicing)
                out = eval('out' + slicing)

                #when summing up, use only the middle of the patches. Depending on how big 'step' was.
                segmented += torch.sum(out * maske, axis=axes)
                existing += torch.sum(target_oh * maske, axis=axes)
                intersec += torch.sum(target_oh * maske * out, axis=axes)

                #save output if required
                if saveout:  #whats faster, simply saving to an existing tensor, or iffing every loop??
                    for idd, slajs in enumerate(gtslices):
                        tmp = torch.argmax(out[idd, ...], dim=0)
                        if not in3D:
                            tmp = tmp.unsqueeze(bydim)
                    # print(tmp.shape)
                        tmp = torch.nn.functional.pad(tmp, paddingup)
                        eval(f'empty_subj[{slajs[2:]}].copy_(tmp)')

        #all saved, now calc actual dices:
        Dices[idx, :] = 2 * intersec / (existing + segmented
                                        )  #calc dice from the gathering lists
        if saveout:
            #save img as npy.
            np.save(f'out{pid}_{savename}.npy', empty_subj.cpu().numpy())

    print('Done.')
    #pidis = [int(p) for p in PIDS]
    dices = np.concatenate((np.array(PID)[:, None], Dices.cpu().numpy()),
                           axis=1)
    np.save(f'dices_{savename}.npy', dices)
    return dices
Beispiel #10
0
    def __init__(self,
                 expected_value,
                 values,
                 data=None,
                 output_shape=tuple(),
                 interaction_order=0,
                 instance_names=None,
                 input_names=None,
                 output_names=None,
                 output_indexes=None,
                 feature_types=None,
                 lower_bounds=None,
                 upper_bounds=None,
                 main_effects=None,
                 hierarchical_values=None,
                 original_rows=None,
                 clustering=None):
        self.transform_history = []

        input_shape = _compute_shape(data)

        # trim any trailing None shapes since we don't want slicer to try and use those
        if len(input_shape) > 0 and input_shape[-1] is None:
            input_shape = input_shape[:-1]

        values_dims = list(
            range(len(input_shape) + interaction_order + len(output_shape)))
        output_dims = range(
            len(input_shape) + interaction_order, values_dims[-1])

        #main_effects_inds = values_dims[0:len(input_shape)] + values_dims[len(input_shape) + interaction_order:]
        self.output_names = output_names  # TODO: needs to tracked after slicing still

        kwargs_dict = {}
        if lower_bounds is not None:
            kwargs_dict["lower_bounds"] = (values_dims, Slicer(lower_bounds))
        if upper_bounds is not None:
            kwargs_dict["upper_bounds"] = (values_dims, Slicer(upper_bounds))
        if main_effects is not None:
            kwargs_dict["main_effects"] = (values_dims, Slicer(main_effects))
        if output_indexes is not None:
            kwargs_dict["output_indexes"] = (output_dims,
                                             Slicer(output_indexes))
        if output_names is not None:
            kwargs_dict["output_names"] = (output_dims, Slicer(output_names))
        if hierarchical_values is not None:
            kwargs_dict["hierarchical_values"] = (values_dims,
                                                  Slicer(hierarchical_values))
        if input_names is not None:
            if not is_1d(input_names):
                input_name_dims = values_dims
            else:
                input_name_dims = values_dims[1:]
            kwargs_dict["input_names"] = (input_name_dims, Slicer(input_names))
        if original_rows is not None:
            kwargs_dict["original_rows"] = (values_dims[1:],
                                            Slicer(original_rows))
        if clustering is not None:
            kwargs_dict["clustering"] = ([0], Slicer(clustering))
        if expected_value is not None:
            ndims = len(getattr(expected_value, "shape", []))
            if ndims == len(values_dims):
                kwargs_dict["expected_value"] = (values_dims,
                                                 Slicer(expected_value))
            elif ndims == len(values_dims) - 1:
                kwargs_dict["expected_value"] = (values_dims[1:],
                                                 Slicer(expected_value))
            else:
                raise Exception(
                    "The shape of the passed expected_value does not match the shape of the passed values!"
                )
        # if clustering is not None:
        #     self.clustering = clustering

        super().__init__(data, values, input_shape, output_shape,
                         expected_value, interaction_order, instance_names,
                         input_names, feature_types, **kwargs_dict)
Beispiel #11
0
    def __init__(  # pylint: disable=too-many-arguments
            self,
            values,
            base_values=None,
            data=None,
            display_data=None,
            instance_names=None,
            feature_names=None,
            output_names=None,
            output_indexes=None,
            lower_bounds=None,
            upper_bounds=None,
            error_std=None,
            main_effects=None,
            hierarchical_values=None,
            clustering=None,
            compute_time=None):
        self.op_history = []

        self.compute_time = compute_time

        # cloning. TODOsomeday: better cloning :)
        if issubclass(type(values), Explanation):
            e = values
            values = e.values
            base_values = e.base_values
            data = e.data

        self.output_dims = compute_output_dims(values, base_values, data,
                                               output_names)
        values_shape = _compute_shape(values)

        if output_names is None and len(self.output_dims) == 1:
            output_names = [
                f"Output {i}" for i in range(values_shape[self.output_dims[0]])
            ]

        if len(
                _compute_shape(feature_names)
        ) == 1:  # TODOsomeday: should always be an alias once slicer supports per-row aliases
            if len(values_shape) >= 1 and len(
                    feature_names) == values_shape[0]:
                feature_names = Alias(list(feature_names), 0)
            elif len(values_shape) >= 2 and len(
                    feature_names) == values_shape[1]:
                feature_names = Alias(list(feature_names), 1)

        if len(
                _compute_shape(output_names)
        ) == 1:  # TODOsomeday: should always be an alias once slicer supports per-row aliases
            output_names = Alias(list(output_names), self.output_dims[0])
            # if len(values_shape) >= 1 and len(output_names) == values_shape[0]:
            #     output_names = Alias(list(output_names), 0)
            # elif len(values_shape) >= 2 and len(output_names) == values_shape[1]:
            #     output_names = Alias(list(output_names), 1)

        if output_names is not None and not isinstance(output_names, Alias):
            l = len(_compute_shape(output_names))
            if l == 0:
                pass
            elif l == 1:
                output_names = Obj(output_names, self.output_dims)
            elif l == 2:
                output_names = Obj(output_names, [0] + list(self.output_dims))
            else:
                raise ValueError(
                    "shap.Explanation does not yet support output_names of order greater than 3!"
                )

        if not hasattr(base_values, "__len__") or len(base_values) == 0:
            pass
        elif len(_compute_shape(base_values)) == len(self.output_dims):
            base_values = Obj(base_values, list(self.output_dims))
        else:
            base_values = Obj(base_values, [0] + list(self.output_dims))

        self._s = Slicer(
            values=values,
            base_values=base_values,
            data=list_wrap(data),
            display_data=list_wrap(display_data),
            instance_names=None if instance_names is None else Alias(
                instance_names, 0),
            feature_names=feature_names,
            output_names=output_names,
            output_indexes=None if output_indexes is None else
            (self.output_dims, output_indexes),
            lower_bounds=list_wrap(lower_bounds),
            upper_bounds=list_wrap(upper_bounds),
            error_std=list_wrap(error_std),
            main_effects=list_wrap(main_effects),
            hierarchical_values=list_wrap(hierarchical_values),
            clustering=None if clustering is None else Obj(clustering, [0]))
Beispiel #12
0
from slicer import Slicer
import time
import datetime

dir_name = "C:\\Users\\absch\\Desktop\\slicer-test-large\\"
img_ext = ".jpg"

test_timestamp = datetime.datetime.fromtimestamp(
    time.time()).strftime('%Y-%m-%d %H:%M:%S')
f = open("timelog.txt", "a")

begin = time.clock()
slicer = Slicer(dir_name, img_ext, "simple", False)
slicer.slice()
f.write(test_timestamp + "\t\tSimple\t\t" +
        str(round(time.clock() - begin, 4)) + "\n")

begin = time.clock()
convex_slicer = Slicer(dir_name, img_ext, "convex", False, 10)
convex_slicer.slice()
f.write(test_timestamp + "\t\tConvex\t\t" +
        str(round(time.clock() - begin, 4)) + "\n")

begin = time.clock()
concave_slicer = Slicer(dir_name, img_ext, "concave", False, 10)
concave_slicer.slice()
f.write(test_timestamp + "\t\tConcave\t\t" +
        str(round(time.clock() - begin, 4)) + "\n")
Beispiel #13
0
    #==============================================================================
    #     Process Raw Data
    #==============================================================================
    if args.intype[0] == 'raw':
        if args.interpolate:
            process_series_files.process_all_in_dir(args.indir[0],
                                                    join(out_dir, 'data'))
            data_dir = join(out_dir, 'data')
        """
        else: #just copy the files
            print "Copying data files to ", data_dir
            for csvf in glob.iglob(join(args.indir[0],"*.csv")):
                shutil.copyfile(csvf, join(data_dir, os.path.basename(csvf)))
        """
        print "Instantiating Slicer and loading series"
        slicer = Slicer(taskfile=join(data_dir, 'task.xls'))
        filelist=[join(data_dir,f) for f in os.listdir(data_dir) if \
            re.compile(".*\.csv").match(f)]
        num_subjects = len(filelist)
        slicer.load_series_from_csv('raw', filelist)

        if args.stats:
            pp = PdfPages(join(report_dir, 'stats.pdf'))
            stats.plot_all(slicer, pp)

            fig, ax = plt.subplots()
            ax.plot(range(1, num_subjects + 1))
            plt.title("Number of subjects")
            pp.savefig(fig)
            pp.close()
Beispiel #14
0
from slicer import Slicer

#dir_name = "C:\\Users\\absch\\Desktop\\Slicer-test\\"
dir_name = "/mnt/c/Users/absch/Desktop/Slicer-test/"
img_ext = ".jpg"

slicer = Slicer(dir_name, img_ext, "simple", reverse=False, num_slices=20)
slicer.slice()

# convex_slicer = Slicer(dir_name, img_ext, "convex", False, 10)
# convex_slicer.slice()
#
# concave_slicer = Slicer(dir_name, img_ext, "concave", False, 10)
# concave_slicer.slice()
Beispiel #15
0
        row = []
        for _ in range(0, len(template[row_index])):
            row.append(" ")
        picture.append(row)
    return picture


def add_piece(picture, slices):
    print_picture(picture)
    for i in slices:
        for j in i:
            picture[j[2]][j[1]] = j[0]
            print_picture(picture)


def print_picture(picture):
    os.system('cls' if os.name == 'nt' else 'clear')
    for i in range(0, len(picture)):
        output = ""
        for j in range(0, len(picture[i])):
            output += picture[i][j]
        print(output)


if __name__ == "__main__":
    lib = Library(input("Type your special character: "))
    template = lib.assemble_line(input("Type your phrase: "))
    slicer = Slicer(template)
    slices = slicer.get_pattern()
    picture = get_picture(template)
    add_piece(picture, slices)
Beispiel #16
0
        slicer.extract_rolling_median(seriesname='raw', window_size=ws)
        rm = slicer.series['raw_rolling_median_' + str(ws)][start:end]
        rm_x = [
            int(j.microseconds / 1000)
            for j in [i - rm.index[0] for i in rm.index]
        ]
        rm_y = [i for i in rm]
        #rm.plot(xticks=rm.index)
        plt.plot(rm_x, rm_y)

    plt.legend(['512Hz EEG']+[ 'Window size: %d' % ws \
                                for ws in window_sizes]
                                ,loc='best')
    plt.ylabel(r"Potential ($\mu$V)")
    plt.xlabel(r"Time after stimulus (ms)")
    plt.grid()
    #plt.title('10 Hz rolling median, compared to 512Hz signal')
    ax.set_ylim(ax.get_ylim()[::-1])

    pdfpages.savefig()
    #plt.show() #debug


if __name__ == "__main__":
    slicer = Slicer()
    print 'loading raw from list of csvfiles'
    slicer.load_series_from_csv('raw', sys.argv[1:])
    pp = PdfPages('rolling_median.pdf')
    do_charts(slicer, pp)
    pp.close()