Example #1
0
    def ingest(self, receipt, classes, labels=None):
        mkdir_p(self.base_path)

        store_labels = labels is None
        labels = labels or []

        with open(self.feature_names_path, "a+") as feature_file, \
                open(self.labels_path, "a") as labels_file:
            feature_file.seek(0)
            feature_names = set(feature_file.read().split())

            for idx, name in enumerate(self.class_names):
                with open(self.feature_data_path(idx, name), "a") as outfile:
                    for position in classes[idx]:
                        features = self.extract_features(receipt, position)

                        print(json.dumps(features), file=outfile)

                        feature_names.update(features.keys())

                        if store_labels:
                            labels.append((position, idx))

            feature_file.seek(0)
            feature_file.truncate()
            feature_file.write("\n".join(sorted(feature_names)))

            if store_labels:
                print(json.dumps((receipt.path, labels)), file=labels_file)

        return True
Example #2
0
def train():
    logging.info('train bss model')
    sig1_list = util.load_data(config.DATA_TRAIN_LIST, config.DATA_TRAIN_ROOT)
    # sig1_list = util.load_data_from_matlab(config.DATA_TRAIN_LIST, config.DATA_TRAIN_ROOT)
    sig2 = util.read_wav(config.DATA_TRAIN_STATIC_SIGNAL)
    # sig2 = util.read_wav_from_matlab(config.DATA_TRAIN_STATIC_SIGNAL)
    # print('sig1_list is', sig1_list)
    # print('sig2 is', sig2)
    if config.NORMALIZATION:
        sig1_list = [_normalize(sig) for sig in sig1_list]
        sig2 = _normalize(sig2)

    logging.info('mix signals')
    util.mkdir_p(config.DATA_TRAIN_MIX_ROOT)
    mix_filenames = _gen_mix_filename(config.DATA_TRAIN_LIST,
                                      config.DATA_TRAIN_STATIC_SIGNAL,
                                      config.DATA_TRAIN_MIX_ROOT)
    sig2_list = [sig2 for _ in range(len(sig1_list))]
    sig1_list, sig2_list, mix_list = util.mix(sig1_list,
                                              sig2_list,
                                              output=mix_filenames)

    # print('sig1_list is', sig1_list)
    # print('sig2_list is', sig2_list)
    # print('mix_list is', mix_list)
    # logging.info('extract stft features')
    # sig1_stft = _extract_stft(sig1_list)
    # print('sig1_stft', sig1_stft)
    # sig2_mrcg = _extract_stft(sig2_list)
    # mix_mrcg = _extract_stft(mix_list)

    logging.info('extract mrcg features')
    sig1_mrcg = _extract_mrcg(sig1_list)
    print('sig1_mrcg is', sig1_mrcg)
    sig2_mrcg = _extract_mrcg(sig2_list)
    print('sig2_mrcg is', sig2_mrcg)
    mix_mrcg = _extract_mrcg(mix_list)
    print('mix_mrcg is', mix_mrcg)

    logging.info('train neural network')
    sig1_mrcg = util.r_abs(sig1_mrcg)
    sig2_mrcg = util.r_abs(sig2_mrcg)
    mix_mrcg = util.r_abs(mix_mrcg)
    train_x, train_y = mix_mrcg, []
    for s, n in zip(sig1_mrcg, sig2_mrcg):  # 将signal和noise拼接作为nn输出
        train_y.append(np.concatenate((s, n), axis=1))
    train_x = [_extend(sig, config.EXTEND_NUM) for sig in train_x]

    # 各层神经元数量
    layer_size = [len(train_x[0][0])]  # 输入层
    layer_size.extend(config.LAYER_SIZE)  # 隐藏层
    layer_size.append(len(train_y[0][0]))  # 输出层

    dnn = nn.NNet(layer_size)
    util.mkdir_p(config.MODEL_ROOT)
    dnn.train(train_x,
              train_y,
              model_path=config.MODEL_NN_PATH,
              training_epochs=config.EPOCH,
              learning_rate=config.LEARNING_RATE)
Example #3
0
    def install_base(self):
        """
        Install pathogen.vim and create vimpyre directory.

            >>> bat = Bat()
            >>> bat.install_base()
            => => Send a bat to catch pathogen.vim ...
            Catch done! Please add the following message to your .vimrc:
            call pathogen#runtime_append_all_bundles("vimpyre")
        """
        try:
            console('=> => Send a bat to catch pathogen.vim ...')
            raw_urlopen = urllib.urlopen(self.PATHOGEN_URL)
            if raw_urlopen.getcode() == 200:
                util.mkdir_p(self.AUTOLOAD_PATH)
                util.mkdir_p(self.VIMPYRE_PATH)
                raw_pathogen = raw_urlopen.read()
                pathogen = path.join(self.AUTOLOAD_PATH, 'pathogen.vim')
                with open(pathogen, 'w') as f:
                    f.write(raw_pathogen)
                console('Catch done! Please add the following to your .vimrc:')
                console('call pathogen#runtime_append_all_bundles("vimpyre")')
            else:
                console('Catch fail! Please try again!')
        except:
            console('[Unexpected Error] Catch fail! Please try again!')
Example #4
0
def main(*args, **kwargs):
    """
    """
    if not HAS_MATPLOTLIB:
        raise ImportError(
            "Please install matplotlib to generate a plot from this example.")

    ############################################
    # Step 1: Define parameters for the models #
    ############################################
    util.mkdir_p(FLAGS.run_dir)
    util.logging(FLAGS)
    tf.logging.info('[Step 1]: Define parameters for the models')
    util.report_param(FLAGS)

    ################################
    # Step 2: Define input dataset #
    ################################
    tf.logging.info('[Step 2]: Define input dataset')
    if FLAGS.data_type == 'random_signal':
        x, y = util.random_signal(length=FLAGS.data_length)

    # Save the data into numpy arra
    np.savez_compressed(os.path.join(FLAGS.run_dir, 'input-data'), x=x, y=y)
    plt.plot(x, y)
    plt.savefig(os.path.join(FLAGS.run_dir, 'input-timeseries.jpg'))

    for i in tqdm(range(FLAGS.iterations)):  # train the model iterations
        pass
    print("learning rate", FLAGS.learning_rate)
Example #5
0
    def save_gfx(self, path, data=None):
        if data is None:
            print('got data')
            data = self.get_data()

        for k, v in data.items():
            if type(v) is dict or type(v) is OrderedDict:
                self.save_gfx(path, v)
            elif k == 'icon':
                icon_path = data['icon'].split('/')

                if icon_path[0] not in self.modlist.path_map:
                    print('Unknown content path %s for %s/%s' %
                          (icon_path[0], data['type'], data['name']))
                    continue

                icon_path[0] = self.modlist.path_map[icon_path[0]]
                icon_path = '/'.join(icon_path)

                if 'type' not in data:
                    # attempt to extract name and type from filepath
                    path_els = v[:v.rindex('.')].split('/')
                    itm_type, name = path_els[-2:]
                else:
                    itm_type, name = data['type'], data['name']

                out_dir = '%s/%s' % (path, itm_type)
                out_path = '%s/%s.png' % (out_dir, name)
                mkdir_p(out_dir)

                if os.path.exists(out_path):
                    print('Overwriting %s/%s' % (itm_type, name))

                shutil.copy2(icon_path, out_path)
Example #6
0
    def save_gfx(self, path, data=None):
        if data is None:
            print('got data')
            data = self.get_data()

        for k, v in data.items():
            if type(v) is dict:
                self.save_gfx(path, v)
                pass
            elif k == 'icon':
                icon_path = data['icon'].split('/')

                if icon_path[0] not in self.modlist.path_map:
                    print('Unknown content path %s for %s/%s' % (icon_path[0], data['type'], data['name']))
                    continue

                icon_path[0] = self.modlist.path_map[icon_path[0]]
                icon_path = '/'.join(icon_path)

                out_dir = '%s/%s' % (path, data['type'])
                out_path = '%s/%s.png' % (out_dir, data['name'])
                mkdir_p(out_dir)

                if os.path.exists(out_path):
                    print('Overwriting %s/%s' % (data['type'], data['name']))

                shutil.copy2(icon_path, out_path)
	def mutate(self):
		"""
		Performs the mutation.  Applies mutation operator to each source file,
		then stores a diff between the original and mutated file.

		# mutants = # source files x # mutation operators
		"""
		count = 0
		start = time()

		for src_file in self.project.source_files():
			original_path = join(self.project.settings["source_path"], src_file)
			mutant_path = join(out_dir, src_file)
			mkdir_p(dirname(mutant_path))

			for (op, invoke) in self.project.settings["mutants"].items():
				if invoke:
					p = Popen(["txl", original_path, join("vendor", "conman", "%s.Txl" % op)], stdout=open(mutant_path, "w"), stderr=open("/dev/null"))
					self.store.put(diff(relpath(original_path), relpath(mutant_path)), op, src_file)
					count += 1

					if count % 1000 == 0:
						print("Generated %d mutants.  Elapsed time %.02f seconds." % (count, (time() - start)))

		stop = time()
		print("Generated %d mutants in %d seconds." % (count, (stop - start)))
Example #8
0
    def install_base(self):
        """
        Install pathogen.vim and create vimpyre directory.

            >>> bat = Bat()
            >>> bat.install_base()
            => => Send a bat to catch pathogen.vim ...
            Catch done! Please add the following message to your .vimrc:
            execute pathogen#infect('bundle/{}', 'vimpyre/{}')
        """
        try:
            console('=> => Send a bat to catch pathogen.vim ...')
            raw_urlopen = urllib.urlopen(self.pathogen_url)
            if raw_urlopen.getcode() == 200:
                util.mkdir_p(self.AUTOLOAD_PATH)
                util.mkdir_p(self.VIMPYRE_PATH)
                raw_pathogen = raw_urlopen.read()
                pathogen = path.join(self.AUTOLOAD_PATH, 'pathogen.vim')
                with open(pathogen, 'w') as f:
                    f.write(raw_pathogen)
                console('Catch done! Please add the following to your .vimrc:')
                console("execute pathogen#infect('bundle/{}', 'vimpyre/{}')")
            else:
                console('Pathogen vimscript not found in %s' % self.pathogen_url)
                console('You can change this url with enviroment variable VIM_PATHOGEN_URL')
                console('Catch fail! Please try again!')
        except:
            console('[Unexpected Error] Catch fail! Please try again!')
Example #9
0
    def write_file(self, path):
        if not hasattr(self, 'cached_obj') or self.kobj._always_regenerate:
            self.render()

        if self.cached_obj is None:
            return

        if not hasattr(self, 'cached_yaml'):
            self.yaml()

        if self.uses_namespace:
            path = os.path.join(path, self.namespace_name)
            mkdir_p(path)

        self.filedir = path
        self.filename = self.identifier + '.yaml'

        self.debug(3, "writing file {}/{}".format(self.filedir, self.filename))

        sav_context = var_types.VarContext.current_context
        var_types.VarContext.current_context = {'confidential': False}
        try:
            content = str(self.cached_yaml)
            self.is_confidential = var_types.VarContext.current_context['confidential']
        finally:
            var_types.VarContext.current_context = sav_context

        if self.is_confidential:
            self.debug(3, "  file {}/{} is confidential".format(self.filedir, self.filename))

        with open(os.path.join(path, '.' + self.identifier + '.tmp'), 'w') as f:
            f.write(content)
        os.rename(os.path.join(path, '.' + self.identifier + '.tmp'),
                  os.path.join(path, self.filename))
Example #10
0
    def _write_output_clustered(self):
        with self.confidential(self.base) as confidential:
            for c in self.repository.get_clusters():
                path = os.path.join(self.base, c)
                mkdir_p(path)

                ns_done = set()
                for ns in self.clusterless:
                    ns_done.add(ns)
                    outputs = []
                    outputs.extend(self.clusterless[ns].values())
                    if c in self.clustered and ns in self.clustered[c]:
                        outputs.extend(self.clustered[c][ns].values())

                    if any(map(lambda x: x.has_data() and not x.is_namespace, outputs)):
                        for op in outputs:
                            op.write_file(path)
                            confidential.add_file(op)

                if not c in self.clustered:
                    continue

                for ns in self.clustered[c]:
                    if ns in ns_done:
                        continue

                    if any(map(lambda x: x.has_data() and not x.is_namespace, self.clustered[c][ns].values())):
                        for op in self.clustered[c][ns].values():
                            op.write_file(path)
                            confidential.add_file(op)
Example #11
0
    def write_binary(self, outdir, opp=True):
        """Write particle to LabView binary file in outdir
        """
        if not self.has_data():
            return

        # Might have julian day, might not
        root = os.path.join(outdir, os.path.dirname(self.file_id))
        util.mkdir_p(root)
        outfile = os.path.join(outdir, self.file_id)
        if opp:
            outfile += ".opp"
        if os.path.exists(outfile):
            os.remove(outfile)
        if os.path.exists(outfile + ".gz"):
            os.remove(outfile + ".gz")

        with open(outfile, "wb") as fh:
            # Write 32-bit uint particle count header
            header = np.array([self.particle_count], np.uint32)
            header.tofile(fh)

            # Write particle data
            self._create_particle_matrix().tofile(fh)

        util.gzip_file(outfile)
Example #12
0
    def __init__(self):
        self.settings = {
            'workdir':
            PaperworkSetting("Global", "WorkDirectory",
                             lambda: os.path.expanduser("~/papers")),
            'index_version':
            PaperworkSetting("Global", "IndexVersion", lambda: "-1"),
        }

        self._configparser = None

        # Possible config files are evaluated in the order they are in the
        # array. The last one of the list is the default one.
        configfiles = [
            "./paperwork.conf",
            os.path.expanduser("~/.paperwork.conf"),
            ("%s/paperwork.conf" %
             (os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))))
        ]

        configfile_found = False
        for self.__configfile in configfiles:
            if os.access(self.__configfile, os.R_OK):
                configfile_found = True
                logger.info("Config file found: %s", self.__configfile)
                break
        if not configfile_found:
            logger.info("Config file not found. Will use '%s'",
                        self.__configfile)
        util.mkdir_p(os.path.dirname(self.__configfile))
Example #13
0
def filter_tags(tracks, tags, artist_threshold, directory=None):
    if directory is not None:
        util.mkdir_p(directory)

    tags_new_all = {}
    for category in tags:
        stats, total = statistics.get_statistics(category, tracks, tags)
        stats_filtered = stats[stats['artists'] >= artist_threshold]
        if directory is not None:
            statistics.write_statistics(category, stats_filtered, directory)

        tags_new_all[category] = set(stats_filtered['tag'])
        print("- {} tags: {} -> {}".format(category, len(stats),
                                           len(stats_filtered)))

    # TODO: replace with filter_subset
    tracks_to_delete = []
    for track_id, track in tracks.items():
        total_tags = 0
        for category, tags_new in tags_new_all.items():
            track[category] &= tags_new
            total_tags += len(track[category])
        if total_tags == 0:
            tracks_to_delete.append(track_id)

    for track in tracks_to_delete:
        tracks.pop(track)
Example #14
0
File: setup.py Project: sretik/demi
def setup_experiment(args, config):
  # Grab parameters
  if args.exp_name:
    config.exp_name = args.exp_name
  # elif not hasattr(config, 'exp_name'):
  #   config.exp_name = exp_lifecycle.guess_config_name(config)

  if not hasattr(config, 'results_dir'):
    config.results_dir = "experiments/%s" % config.exp_name

  if args.timestamp_results is not None:
    # Note that argparse returns a list
    config.timestamp_results = args.timestamp_results

  if hasattr(config, 'timestamp_results') and config.timestamp_results:
    now = timestamp_string()
    config.results_dir += "_" + str(now)

  # Set up results directory
  mkdir_p("./experiments")
  create_clean_dir(config.results_dir)

  # Make sure that there are no uncommited changes
  if args.publish:
    exp_lifecycle.publish_prepare(config.exp_name, config.results_dir)

  # Record machine information for this experiment
  additional_metadata = None
  if hasattr(config, "get_additional_metadata"):
    additional_metadata = config.get_additional_metadata()

  exp_lifecycle.dump_metadata("%s/metadata" % config.results_dir,
                              additional_metadata=additional_metadata)
Example #15
0
    def _write_output_clustered(self):
        changed = []
        with self.confidential(self.base) as confidential:
            for c in self.repository.get_clusters():
                path = os.path.join(self.base, c)
                is_openshift = self.repository.is_openshift or self.repository.get_cluster_info(
                    c).is_openshift
                mkdir_p(path)

                ns_done = set()
                for ns in self.clusterless:
                    ns_done.add(ns)
                    outputs = []
                    outputs.extend(self.clusterless[ns].values())
                    if c in self.clustered and ns in self.clustered[c]:
                        outputs.extend(self.clustered[c][ns].values())

                    changed.extend(
                        self._write_namespace(outputs, path, confidential,
                                              is_openshift))

                if not c in self.clustered:
                    continue

                for ns in self.clustered[c]:
                    if ns in ns_done:
                        continue

                    changed.extend(
                        self._write_namespace(self.clustered[c][ns].values(),
                                              path, confidential,
                                              is_openshift))
        return changed
Example #16
0
def build(args):
    use_cuda = 'cuda' in args.features
    use_cl = 'cl' in args.features
    use_cudnn = 'cudnn' in args.features
    use_eigen = 'eigen' in args.features
    use_blas = 'openblas' in args.features
    use_nnpack = 'nnpack' in args.features
    use_protobuf = 'protobuf' in args.features
    use_opencv = 'opencv' in args.features
    build_examples = 'examples' in args.features
    build_tools = 'tools' in args.features
    build_service = 'service' in args.features
    build_test = 'test' in args.features
    build_shared_libs = 'shared' in args.features

    cmake_options = {}
    if use_cuda:
        cmake_options['USE_CUDA'] = True
        cmake_options['USE_CL'] = False
        cmake_options['USE_CUDNN'] = use_cudnn
    elif use_cl:
        cmake_options['USE_CUDA'] = False
        cmake_options['USE_CL'] = True
    else:
        cmake_options['USE_CUDA'] = False
        cmake_options['USE_CL'] = False
        cmake_options['USE_Eigen'] = use_eigen
        cmake_options['USE_BLAS'] = use_blas
        cmake_options['USE_NNPACK'] = use_nnpack
    cmake_options['USE_Protobuf'] = use_protobuf
    cmake_options['USE_OpenCV'] = use_opencv
    cmake_options['BUILD_EXAMPLES'] = build_examples
    cmake_options['BUILD_TOOLS'] = build_tools
    cmake_options['BUILD_SERVICE'] = build_service
    cmake_options['BUILD_TEST'] = build_test
    cmake_options['BUILD_SHARED_LIBS'] = build_shared_libs

    shadow_root = os.path.dirname(os.path.abspath(__file__)) + '/../..'
    build_root = shadow_root + '/build/' + args.subdir

    if not os.path.isdir(build_root):
        util.mkdir_p(build_root)

    shell_cmd = 'cd ' + build_root + ' && '
    shell_cmd += 'cmake ../.. '
    choices = ['OFF', 'ON']
    for define in args.define:
        shell_cmd += '-D' + define + ' '
    for (feature, value) in cmake_options.items():
        shell_cmd += '-D' + feature + '=' + choices[int(value)] + ' '
    if args.generator == 'make':
        shell_cmd += '&& make -j2'
    elif args.generator == 'ninja':
        shell_cmd += '-GNinja && ninja'

    if args.debug != 'nodebug':
        print(shell_cmd + '\n' + ''.join(['='] * 60))

    subprocess.check_call(shell_cmd, shell=True)
def compute_statistics(tracks, tags, directory):
    util.mkdir_p(directory)

    for category in tags:
        data, total = get_statistics(category, tracks, tags)
        write_statistics(category, data, directory)
        print('Total tags for {}: {} tags, {}'.format(category, len(data),
                                                      total))
Example #18
0
def save_prefs(prefs):
    util.mkdir_p(storage_path)
    data = json.dumps(prefs, indent=4)
    try:
        with open(_prefs_file(), 'w') as f:
            f.write(data)
    except IOError as e:
        log.warning('Error saving %s: %s', _prefs_file(), e)
Example #19
0
 def _write_output_clusterless(self):
     mkdir_p(self.base)
     with self.confidential(self.base) as confidential:
         for ns in self.clusterless:
             if any(map(lambda x: x.has_data() and not x.is_namespace, self.clusterless[ns].values())):
                 for op in self.clusterless[ns].values():
                     op.write_file(self.base)
                     confidential.add_file(op)
Example #20
0
    def write_file(self, path):
        if not hasattr(self, 'cached_obj') or self.kobj._always_regenerate:
            self.render()

        if self.cached_obj is None:
            return

        if not hasattr(self, 'cached_yaml'):
            self.yaml()

        if self.uses_namespace:
            path = os.path.join(path, self.namespace_name)
            mkdir_p(path)

        self.filedir = path
        self.filename = self.filename_conversion(self.identifier) + '.yaml'

        self.debug(3, "writing file {}/{}".format(self.filedir, self.filename))

        sav_context = var_types.VarContext.current_context
        var_types.VarContext.current_context = {'confidential': False}
        try:
            content = str(self.cached_yaml)
            self.is_confidential = var_types.VarContext.current_context[
                'confidential']
        finally:
            var_types.VarContext.current_context = sav_context

        if self.is_confidential:
            self.debug(
                3,
                "  file {}/{} is confidential".format(self.filedir,
                                                      self.filename))

        changed = False
        if self.content_check is not None and self.content_check in (
                'contents', 'yaml', 'exists'):
            try:
                with open(os.path.join(path, self.filename), 'rb') as f:
                    if self.content_check == 'contents':
                        if content != f.read().decode('utf8'):
                            changed = True
                    elif self.content_check == 'yaml':
                        if yaml_load(content) != yaml_load(
                                f.read().decode('utf8')):
                            changed = True
            except:
                changed = True

        with open(os.path.join(path, '.' + self.identifier + '.tmp'),
                  'w') as f:
            f.write(content)
        os.rename(os.path.join(path, '.' + self.identifier + '.tmp'),
                  os.path.join(path, self.filename))

        if changed:
            return os.path.join(path, self.filename)
        return None
Example #21
0
    def _aligin(input, output):
        _padding = hop if padding else 0
        length = hop * ext_n * ext_d + (windowsize - hop)

        filename, wav, next_wav = None, [], []
        curr_dir_n, curr_samp_n, curr_dir = -1, file_max + 1, os.path.join(
            output, '0')
        for root, dirs, files in os.walk(input):
            for f in files:
                if f.endswith('.wav'):
                    if filename is None:
                        filename = f

                    y, sr = librosa.load(os.path.join(root, f), sr=samplerate)
                    wav.extend(y)
                    if length > len(wav) + _padding * 2:
                        if concat:
                            continue
                        if too_short == 'append_0':
                            n = length - (len(wav) + _padding * 2)
                            wav += [0 for _ in range(n)]
                        elif too_short == 'discard':
                            filename, wav = None, []
                            continue
                        else:
                            raise ValueError('%s is not supported' % too_short)
                    elif length < len(wav) + _padding * 2:
                        if overlap:
                            surplus = (len(wav) + _padding * 2 -
                                       windowsize) % hop
                        else:
                            surplus = (len(wav) + _padding * 2) % length
                        if surplus != 0:
                            if concat:
                                next_wav = wav[-surplus:]
                                del wav[-surplus:]
                            elif too_long == 'append_0':
                                comp = hop - surplus if overlap else length - surplus
                                wav += [0 for _ in range(comp)]
                            elif too_long == 'cut':
                                del wav[-surplus:]
                            else:
                                raise ValueError('%s is not supported' %
                                                 too_long)

                    if curr_samp_n > file_max:
                        curr_dir_n, curr_samp_n = curr_dir_n + 1, 0
                        curr_dir = os.path.join(output, str(curr_dir_n))
                        util.mkdir_p(curr_dir)
                    path = os.path.join(curr_dir, filename[:-4] + ".voice.wav")
                    librosa.output.write_wav(path, np.array(wav), samplerate)

                    curr_samp_n += 1
                    if concat:
                        filename, wav = None, next_wav
                        next_wav = []
                    else:
                        filename, wav = None, []
Example #22
0
 def write_vct(self, outdir):
     outfile = os.path.join(outdir, self.file_id + ".vct")
     util.mkdir_p(os.path.dirname(outfile))
     if os.path.exists(outfile):
         os.remove(outfile)
     if os.path.exists(outfile + ".gz"):
         os.remove(outfile + ".gz")
     with gzip.open(outfile + ".gz", "wb") as f:
         f.write("\n".join(self.vct) + "\n")
Example #23
0
 def install_mpqueue_as(self, mpqueue, task_index):
     self._MPQ = mpqueue
     self._task_index = task_index
     self._logfile = open(self.args.ckptdir + '{}.out'.format(task_index),
                          'w')
     self._sample_dumpdir = self.args.ckptdir + "sample-{}/".format(
         task_index)
     util.mkdir_p(self._sample_dumpdir)
     self._sample_index = 0
Example #24
0
 def __init__(self, settings, compiler):
     self._settings = settings
     self.compiler_name = settings.get('codeblocks').get('compiler_name', 'gcc')
     self.projectsdir = settings.get('projectsdir')
     util.mkdir_p(self.projectsdir)
     self.configurations = compiler.get_configurations()
     cvars = compiler.get_global_variables()
     self.cflags = cvars['cflags']
     self.lflags = cvars['lflags']
Example #25
0
 def write_vct(self, outdir):
     outfile = os.path.join(outdir, self.file_id + ".vct")
     util.mkdir_p(os.path.dirname(outfile))
     if os.path.exists(outfile):
         os.remove(outfile)
     if os.path.exists(outfile + ".gz"):
         os.remove(outfile + ".gz")
     with gzip.open(outfile + ".gz", "wb") as f:
         f.write("\n".join(self.vct) + "\n")
def torus_master():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    g = tf.Graph()
    util.mkdir_p(ckpt_dir)
    with g.as_default():
        global_step = tf.contrib.framework.get_or_create_global_step()
        increment_global_step = tf.assign_add(global_step,
                                              1,
                                              name='increment_global_step')
        learning_rate_input = tf.placeholder(tf.float32)
        grad_applier = RMSPropApplier(learning_rate=learning_rate_input,
                                      decay=RMSP_ALPHA,
                                      momentum=0.0,
                                      epsilon=RMSP_EPSILON,
                                      clip_norm=GRAD_NORM_CLIP,
                                      device=device)
        masterdriver = rldriver.RLDriver(MODELS,
                                         init_state,
                                         view_config,
                                         config.SV_VISCFG,
                                         config.MV_VISCFG,
                                         use_rgb=True)
        saver = tf.train.Saver(masterdriver.get_nn_args() + [global_step])
        with tf.Session() as sess:
            threads = []
            for i in range(THREAD):
                thread_args = (i, dpy, glctx, masterdriver, g, grad_applier,
                               learning_rate_input, global_step,
                               increment_global_step, sess, saver)
                thread = threading.Thread(target=torus_worker,
                                          args=thread_args)
                thread.start()
                graph_completes[i].wait()
                threads.append(thread)
            '''
            We need to run the initializer because only master's variables are stored.
            '''
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            print('ckpt {}'.format(ckpt))
            epoch = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                epoch = sess.run(global_step)
                print('Restored!, global_step {}'.format(epoch))

            init_done.set()
            for thread in threads:
                thread.join()
            print("Saving final checkpoint")
            fn = saver.save(sess,
                            ckpt_dir + ckpt_prefix,
                            global_step=global_step)
            print("Saved checkpoint to {}".format(fn))
def save_transform_to_database(data_transforms):
    """ Save data transforms to database"""
    # pylint: disable= too-many-locals, bare-except
    conn = sqlite3.connect(util.DB_PATH)
    conn.text_factory = str

    for img in data_transforms:
        cursor = conn.execute('''SELECT pid from Images where id = ? ''', (img.image_id,))
        pid = cursor.fetchone()[0]

        folder = util.DATA_FOLDER + str(pid) + "/registration_transforms/"
        util.mkdir_p(folder)

        transform_paths = ""
        print(img.get_transforms())
        for _transform in img.get_transforms():
            print(_transform)
            dst_file = folder + util.get_basename(_transform) + '.h5.gz'
            if os.path.exists(dst_file):
                os.remove(dst_file)
            with open(_transform, 'rb') as f_in, gzip.open(dst_file, 'wb') as f_out:
                shutil.copyfileobj(f_in, f_out)
            transform_paths += str(pid) + "/registration_transforms/" +\
                basename(_transform) + '.h5.gz' + ", "
        transform_paths = transform_paths[:-2]

        cursor2 = conn.execute('''UPDATE Images SET transform = ? WHERE id = ?''',
                               (transform_paths, img.image_id))
        cursor2 = conn.execute('''UPDATE Images SET fixed_image = ? WHERE id = ?''',
                               (img.fixed_image, img.image_id))

        folder = util.DATA_FOLDER + str(pid) + "/reg_volumes_labels/"
        util.mkdir_p(folder)
        vol_path = util.compress_vol(img.processed_filepath)
        shutil.copy(vol_path, folder)

        volume_db = str(pid) + "/reg_volumes_labels/" + basename(vol_path)
        cursor2 = conn.execute('''UPDATE Images SET filepath_reg = ? WHERE id = ?''',
                               (volume_db, img.image_id))

        cursor = conn.execute('''SELECT filepath, id from Labels where image_id = ? ''',
                              (img.image_id,))
        for (row, label_id) in cursor:
            temp = util.compress_vol(move_vol(util.DATA_FOLDER + row,
                                              img.get_transforms(), True))
            shutil.copy(temp, folder)
            label_db = str(pid) + "/reg_volumes_labels/" + basename(temp)
            cursor2 = conn.execute('''UPDATE Labels SET filepath_reg = ? WHERE id = ?''',
                                   (label_db, label_id))

        conn.commit()
        cursor.close()
        cursor2.close()

#    cursor = conn.execute('''VACUUM; ''')
    conn.close()
Example #28
0
def test_rldriver_main():
    pyosr.init()
    dpy = pyosr.create_display()
    glctx = pyosr.create_gl_context(dpy)
    g = tf.Graph()
    util.mkdir_p(ckpt_dir)
    with g.as_default():
        learning_rate_input = tf.placeholder(tf.float32)
        grad_applier = RMSPropApplier(learning_rate=learning_rate_input,
                                      decay=RMSP_ALPHA,
                                      momentum=0.0,
                                      epsilon=RMSP_EPSILON,
                                      clip_norm=GRAD_NORM_CLIP,
                                      device=device)
        masterdriver = rldriver.RLDriver(MODELS,
                init_state,
                view_config,
                config.SV_VISCFG,
                config.MV_VISCFG,
                use_rgb=True)
        driver = rldriver.RLDriver(MODELS,
                    init_state,
                    view_config,
                    config.SV_VISCFG,
                    config.MV_VISCFG,
                    use_rgb=True,
                    master_driver=masterdriver,
                    grads_applier=grad_applier)
        driver.get_sync_from_master_op()
        driver.get_apply_grads_op()
        driver.learning_rate_input = learning_rate_input
        driver.a3c_local_t = 32
        global_step = tf.contrib.framework.get_or_create_global_step()
        increment_global_step = tf.assign_add(global_step, 1, name='increment_global_step')
        saver = tf.train.Saver(masterdriver.get_nn_args() + [global_step])
        last_time = time.time()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir=ckpt_dir)
            print('ckpt {}'.format(ckpt))
            epoch = 0
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                epoch = sess.run(global_step)
                print('Restored!, global_step {}'.format(epoch))
            while epoch < 100 * 1000:
                driver.train_a3c(sess)
                epoch += 1
                sess.run(increment_global_step)
                if epoch % 1000 == 0 or time.time() - last_time >= 10 * 60:
                    print("Saving checkpoint")
                    fn = saver.save(sess, ckpt_dir+ckpt_prefix, global_step=global_step)
                    print("Saved checkpoint to {}".format(fn))
                    last_time = time.time()
                print("Epoch {}".format(epoch))
        '''
Example #29
0
 def _write_output_clusterless(self):
     changed = []
     mkdir_p(self.base)
     with self.confidential(self.base) as confidential:
         for ns in self.clusterless:
             changed.extend(
                 self._write_namespace(self.clusterless[ns].values(),
                                       self.base, confidential,
                                       self.repository.is_openshift))
     return changed
Example #30
0
def init_hello_world():
    targets_filepath = 'source/hello_world/targets.json'
    helloworld_filepath = 'source/hello_world/hello_world.cpp'
    critical_error_if_file_exists(helloworld_filepath)
    critical_error_if_file_exists(targets_filepath)
    util.mkdir_p(os.path.dirname(targets_filepath))

    with open(targets_filepath, 'w+') as  fd:
        fd.write(util.get_resource('defaults/targets.json'))
    with open(helloworld_filepath, 'w+') as  fd:
        fd.write(util.get_resource('defaults/hello_world.cpp'))
Example #31
0
def get_archive_file_name(config, kind, start_dt, end_dt):
    """get the archive file name. has the format of
       {ARCHIVE_DIR}/YY-mm-dd/{kind}/kind-start_dt-end_dt.pickle
    """
    datestr = str(start_dt.date())
    dirname = "%s/%s/%s" % (config['archive_dir'], datestr, kind)
    mkdir_p(dirname)
    filename = "%s/%s-%s-%s-%s-%s.pickle" % (dirname, kind,
        str(start_dt.date()), str(start_dt.time()),
        str(end_dt.date()), str(end_dt.time()))
    return filename
Example #32
0
def init_hello_world():
    targets_filepath = 'source/hello_world/targets.json'
    helloworld_filepath = 'source/hello_world/hello_world.cpp'
    critical_error_if_file_exists(helloworld_filepath)
    critical_error_if_file_exists(targets_filepath)
    util.mkdir_p(os.path.dirname(targets_filepath))

    with open(targets_filepath, 'w+') as fd:
        fd.write(util.get_resource('defaults/targets.json'))
    with open(helloworld_filepath, 'w+') as fd:
        fd.write(util.get_resource('defaults/hello_world.cpp'))
Example #33
0
def write_tokens(start_dt, end_dt):
    # Write token file at end of main. gae_download.py runs under an hourly,
    # cronjob, so load_emr_daily.sh will start only when all 24 token files
    # are present.
    datestr = str(start_dt.date())
    dirname = "/home/analytics/kabackup/daily_new/"
    dirname += "%s/tokens/" % datestr
    mkdir_p(dirname)
    filename = "token-%s-%s.txt" % (start_dt.time(), end_dt.time())
    f = open(dirname + filename, "w")
    f.close()
Example #34
0
def update_file(file_disk_path, file_download_path):
    if os.path.isfile(file_disk_path):
        file_disk_md5 = util.get_file_md5(file_disk_path)
        file_download_md5 = util.get_file_md5(file_download_path)
        if file_disk_md5 != file_download_md5:
            shutil.copy(file_download_path, file_disk_path)
    else:
        file_disk_dir = os.path.dirname(file_disk_path)
        if not os.path.isdir(file_disk_dir):
            util.mkdir_p(file_disk_dir)
        shutil.copy(file_download_path, file_disk_path)
Example #35
0
def get_archive_file_name(config, kind, start_dt, end_dt):
    """get the archive file name. has the format of
       {ARCHIVE_DIR}/YY-mm-dd/{kind}/kind-start_dt-end_dt.pickle
    """
    datestr = str(start_dt.date())
    dirname = "%s/%s/%s" % (config['archive_dir'], datestr, kind)
    mkdir_p(dirname)
    filename = "%s/%s-%s-%s-%s-%s.pickle" % (dirname, kind, str(
        start_dt.date()), str(start_dt.time()), str(
            end_dt.date()), str(end_dt.time()))
    return filename
Example #36
0
def plotROC(y_true, y_pred, title, path=None):
    """ Inputs: Arrays containing k arrays of true and continous predicted y values for 1 fold
		Outputs: An ROC plot saved at the location given in path relative the the wd
	"""
    plt.gcf().clear()
    fig = plt.figure()
    mean_tpr = 0.0
    mean_fpr = np.linspace(0, 1, 100)

    colors = itertools.cycle([
        'cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange', 'red',
        'pink', 'purple', 'chocolate'
    ])
    lw = 2
    k = 0
    for true, scores, color in zip(y_true, y_pred, colors):
        fpr, tpr, thresholds = roc_curve(true, scores)
        mean_tpr += interp(mean_fpr, fpr, tpr)
        mean_tpr[0] = 0.0
        roc_auc = auc(fpr, tpr)
        plt.plot(fpr,
                 tpr,
                 lw=lw,
                 color=color,
                 label='ROC fold %d (area = %0.2f)' % (k, roc_auc))
        k += 1

    plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k', label='Luck')

    mean_tpr /= len(y_true)
    mean_tpr[-1] = 1.0
    mean_auc = auc(mean_fpr, mean_tpr)
    plt.plot(mean_fpr,
             mean_tpr,
             color='g',
             linestyle='--',
             label='Mean ROC (area = %0.2f)' % mean_auc,
             lw=lw)

    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    #plt.axis("equal")
    plt.title('\n'.join(wrap(title, 60)))
    leg = plt.legend(bbox_to_anchor=(1, 0.815), loc='center left', numpoints=1)
    fig.set_tight_layout(True)
    if path == None:
        plt.show()
    else:
        utilities.mkdir_p(path)
        plt.savefig(path, bbox_inches='tight')
        plt.close("all")
Example #37
0
def generate(ninja_targets, settings):
    project_name = settings.get('project_name')
    working_dir = '$rootdir'

    make_all = BuildSystem('make - ' + project_name, 'make build', working_dir)
    make_all.add_variant('All', 'make all')
    make_all.add_variant('Clean', 'make clean')
    make_all.add_variant('Sublime Text Project', 'make sublime')
    make_all.add_variant('Doxygen Documentation', 'make doxygen')
    ninja_all = BuildSystem('ninja - ' + project_name, 'ninja', working_dir)
    ninja_all.add_variant('All', 'ninja all')
    ninja_all.add_variant('Clean', 'ninja -t clean')
    ninja_all.add_variant('Doxygen Documentation', 'ninja doxygen')

    build_systems = [make_all.data, ninja_all.data]

    for target in ninja_targets.build_targets:
        build_target = BuildSystem(
            target.config + ' - ' + target.name,
            'ninja %s' % target.phony_name,
            working_dir)
        bindir = get_bin(target.config, settings)
        binary = Path.join(working_dir, bindir, target.name)
        build_target.add_variant(
            'Run',
            'ninja %s && %s' % (target.phony_name, binary))
        build_target.add_variant(
            'Clean',
            'ninja -t clean %s' % target.phony_name)
        build_systems.append(build_target.data)

    sublime_settings = settings.get('sublime')

    template_filename = settings.expand_variables(sublime_settings.get('project_template', None))
    if template_filename is not None:
        with open(template_filename, 'r') as fd:
            template = fd.read()
        if not template:
            critical_error('%s seems to be empty', template_filename)
    else:
        template = util.get_resource('defaults/tmpl.sublime-project')

    project = re.sub(
        r'(\$build_systems)',
        json.dumps(build_systems, indent=4, separators=(',', ': ')),
        template)
    project = settings.expand_variables(project)

    filename = settings.expand_variables('$projectsdir/$project_name.sublime-project')
    util.mkdir_p(os.path.dirname(filename))
    with open(filename, 'w+') as out:
        out.write(project)
Example #38
0
def test_prerequesites():
    '''check we have the right directories and tools to run tests'''
    print("Testing prerequesites")
    util.mkdir_p(util.reltopdir('../buildlogs'))
    if not os.path.exists(util.reltopdir('../HILTest/hil_quad.py')):
        print('''
You need to install HILTest in %s

You can get it from git://git.samba.org/tridge/UAV/HILTest.git

        ''' % util.reltopdir('../HILTest'))
        return False
    return True
Example #39
0
    def assertFileContentsMatch(self, filename, expected):
        # overridden from shared/testutil/testcase.py
        # to conditionally regenerate expected output
        # (see comments on _RECREATE_EXPECTED_RESULTS)
        if not _RECREATE_EXPECTED_RESULTS:
            return super(TestBuildBundleBase,
                         self).assertFileContentsMatch(filename, expected)

        target_path = os.path.join(self.real_ka_root, 'kake',
                                   'compile_js_bundles-testfiles', expected)
        util.mkdir_p(os.path.dirname(target_path))
        with open(target_path, 'w') as fout:
            fout.write(self._file_contents(filename))
Example #40
0
def audio_url_for_file(audio_track, uploaded=False):

    if uploaded:
        wav_path = os.path.join(UPLOAD_FOLDER, audio_track)
    else:
        util.mkdir_p(os.path.join('data', 'wav'))
        wav_path = os.path.join('data', 'wav', str(audio_track.id) + ".wav")

    if not os.path.isfile(wav_path):
        data, rate = librosa.load(audio_track.path)
        librosa.output.write_wav(wav_path, data, rate)

    return os.path.join('play', wav_path)
Example #41
0
def create_inventory_id_and_tags(ec2_conf, ec2_info):
    # mock the inventory and ids that should be generated by vagrant
    inventory_dir = '.vagrant/provisioners/ansible/inventory'
    mkdir_p(inventory_dir)
    inventory = open(os.path.join(inventory_dir, 'vagrant_ansible_inventory'), 'w')

    machine_dir = '.vagrant/machines'
    mkdir_p(machine_dir)

    tag = ec2_conf['Tag']
    tags = {}
    for i, instance in enumerate(ec2_info['instances']):
        host = "TachyonMaster" if i == 0 else "TachyonWorker{id}".format(id=i)
        inventory.write("{host} ansible_ssh_host={ip} ansible_ssh_port=22\n".format(host=host, ip=instance['public_ip']))

        instance_id = str(instance['id'])
        id_dir = os.path.join(machine_dir, host, 'aws')
        mkdir_p(id_dir)
        with open(os.path.join(id_dir, 'id'), 'w') as f:
            f.write(instance_id)

        tags[instance_id] = '{tag}-{host}'.format(tag=tag, host=host)

    inventory.close()

    # map instance id to its tag
    spot_tag_dir = 'spot/roles/tag_ec2/vars'
    mkdir_p(spot_tag_dir)
    tag_vars = open(os.path.join(spot_tag_dir, 'main.yml'), 'w')
    var = {
        "tags": tags,
        "region": ec2_conf["Region"],
    }
    yaml.dump(var, tag_vars, default_flow_style=False)
    tag_vars.close()
def filter_cruise(host_assignments, output_dir, process_count=16):
    cruises = [x[0] for x in host_assignments[env.host_string]]
    cruise_results = {}
    workdir = "/mnt/raid"
    with cd(workdir):
        for c in cruises:
            puts("Filtering cruise {}".format(c))
            with hide("commands"):
                run("mkdir {}".format(c))
            with cd(c):
                text = {"cruise": c, "process_count": process_count}
                with settings(warn_only=True), hide("output"):
                    #result = run("seaflowpy_filter --s3 -c {cruise} -d {cruise}.db -l 10 -p 2 -o {cruise}_opp".format(**text))
                    result = run(
                        "seaflowpy_filter --s3 -c {cruise} -d {cruise}.db -t -p {process_count} -o {cruise}_opp".format(**text),
                        timeout=10800
                    )
                    cruise_results[c] = result

            puts(result)

            cruise_output_dir = os.path.join(output_dir, c)

            if result.succeeded:
                puts("Filtering successfully completed for cruise {}".format(c))
                puts("Returning results for cruise {}".format(c))
                util.mkdir_p(cruise_output_dir)
                rsyncout = execute(
                    # rsync files in cruise results dir to local cruise dir
                    rsync_get,
                    os.path.join(workdir, c) + "/",
                    cruise_output_dir,
                    hosts=[env.host_string]
                )
                # Print rsync output on source host, even though this is run
                # on local
                puts(rsyncout[env.host_string])
            else:
                sys.stderr.write("Filtering failed for cruise {}\n".format(c))

            # Always write log output
            util.mkdir_p(cruise_output_dir)
            logpath = os.path.join(cruise_output_dir, "seaflowpy_filter.{}.log".format(c))
            with open(logpath, "w") as logfh:
                logfh.write("command={}\n".format(result.command))
                logfh.write("real_command={}\n".format(result.real_command))
                logfh.write(result + "\n")

    return cruise_results
Example #43
0
 def download_and_save(self):
     mkdir_p(self.chan, abort=True, msg="Couldn't create directory for chan")
     mkdir_p("%s/%s" % (self.chan, self.boardname), abort=True, msg="Couldn't create directory for board")
     catalog = self.get_catalog(False)
     i = 0
     for thread in catalog:
         i += 1
         if not self.suppressed_output:
             print "Downloading thread %d (%d/%d)" % (thread, i, len(catalog))
         filename = "%s/%s/%d.json" % (self.chan, self.boardname, thread)
         connection = httplib.HTTPSConnection(self.host)
         with open(filename, "w") as file:
             connection.request("GET", self.get_thread_json_url(thread))
             file.write(connection.getresponse().read())
         connection.close()
Example #44
0
def mock_vagrant_info(instance_id_to_tag_ip):
    inventory_dir = '.vagrant/provisioners/ansible/inventory'
    mkdir_p(inventory_dir)
    inventory = open(os.path.join(inventory_dir, 'vagrant_ansible_inventory'), 'w')
    for instance_id, tag_ip in instance_id_to_tag_ip.iteritems():
        tag, ip = tag_ip
        host = get_host(tag)

        inventory.write("{} ansible_ssh_host={} ansible_ssh_port=22\n".format(host, ip))

        id_dir = os.path.join('.vagrant', 'machines', host, 'aws')
        mkdir_p(id_dir)
        with open(os.path.join(id_dir, 'id'), 'w') as f:
            f.write(instance_id)
    inventory.close()
Example #45
0
def get_archive_file_name(config, kind, start_dt, end_dt, ftype='pickle'):
    """Get the archive file name. has the format of
    {ARCHIVE_DIR}/YY-mm-dd/{kind}/kind-start_dt-end_dt.pickle

    """

    # Note that Hadoop does not like leading underscores in files, so we strip
    # out leading underscores (as may be used in the case of private classes)
    kind = re.sub(r'^_*', '', kind)

    datestr = str(start_dt.date())
    dirname = "%s/%s/%s" % (config['archive_dir'], datestr, kind)
    mkdir_p(dirname)
    filename = "%s/%s-%s-%s-%s-%s.%s" % (dirname, kind,
        str(start_dt.date()), str(start_dt.time()),
        str(end_dt.date()), str(end_dt.time()), ftype)
    return filename
Example #46
0
def link_scripts(dest_dir, src_dir=None):
    original_scripts_dir = src_dir or util.get_installed_scripts_dir()

    scripts = [
        (s['name'], get_new_script_path(s['module'], s['function']))
        for s in get_console_scripts_info()
    ]

    for (original, new_path) in scripts:
        original_path = os.path.join(original_scripts_dir, original)
        new_path = os.path.join(dest_dir, new_path)

        util.mkdir_p(os.path.dirname(new_path))

        print 'Symlinking %s to %s ...' % (original_path, new_path)
        if os.path.lexists(new_path):
            os.remove(new_path)
        os.symlink(original_path, new_path)
Example #47
0
    def install(self):
        console('=> => Send a bat to catch %s' % self.CURR_SCRIPT)

        try:
            ret = self._check_name()
            if ret:
                fetch_url = self._render_fetch_url(ret)
                cmd_fetch = 'git clone --depth 1 %s' % fetch_url
                util.mkdir_p(self.VIMPYRE_PATH)
                with util.cd(self.VIMPYRE_PATH):
                    system(cmd_fetch)
            else:
                msg = ('%s not found! Please use `vimpyre search <vim-script>`'
                       ' to check the script name and install again!' %
                       self.CURR_SCRIPT)
                console(msg)
        except:
            self.install_base()
def convert_and_save_dataset(pid, cursor, image_type, volume_labels, volume, glioma_grade):
    """convert_and_save_dataset"""
    util.mkdir_p(util.DATA_FOLDER + str(pid))
    img_out_folder = util.DATA_FOLDER + str(pid) + "/volumes_labels/"
    util.mkdir_p(img_out_folder)

    cursor.execute('''SELECT pid from Patient where pid = ?''', (pid,))
    exist = cursor.fetchone()
    if exist is None:
        cursor.execute('''INSERT INTO Patient(pid, glioma_grade) VALUES(?, ?)''', (pid, glioma_grade))

    cursor.execute('''INSERT INTO Images(pid, modality, diag_pre_post) VALUES(?,?,?)''',
                   (pid, 'MR', image_type))
    img_id = cursor.lastrowid

    _, file_extension = os.path.splitext(volume)
    volume_temp = "volume" + file_extension
    shutil.copy(volume, volume_temp)

    volume_out = img_out_folder + str(pid) + "_" + str(img_id) + "_MR_T1_" + image_type + ".nii.gz"
    print("--->", volume_out)
    os.system(DWICONVERT_PATH + " --inputVolume " + volume_temp + " -o " + volume_out + " --conversionMode NrrdToFSL")
    volume_out_db = volume_out.replace(util.DATA_FOLDER, "")
    cursor.execute('''UPDATE Images SET filepath = ? WHERE id = ?''', (volume_out_db, img_id))
    os.remove(volume_temp)

    for volume_label in volume_labels:
        _, file_extension = os.path.splitext(volume_label)
        volume_label_temp = "volume_label" + file_extension
        shutil.copy(volume_label, volume_label_temp)

        cursor.execute('''INSERT INTO Labels(image_id, description) VALUES(?,?)''',
                       (img_id, 'all'))
        label_id = cursor.lastrowid

        volume_label_out = img_out_folder + str(pid) + "_" + str(img_id) + "_MR_T1_" + image_type\
            + "_label_all.nii.gz"
        os.system(DWICONVERT_PATH + " --inputVolume " + volume_label_temp + " -o " +
                  volume_label_out + " --conversionMode NrrdToFSL")
        volume_label_out_db = volume_label_out.replace(util.DATA_FOLDER, "")
        cursor.execute('''UPDATE Labels SET filepath = ? WHERE id = ?''',
                       (volume_label_out_db, label_id))
        os.remove(volume_label_temp)
Example #49
0
def load_pack(pack_config: PackConfig):
    if pack_config.mods_path is None:
        pack_config.mods_path = os.path.join(pack_config.factorio_path, 'mods')

    locale = FactorioLocale()
    pack_dir = get_pack_dir(pack_config.name)
    oldcwd = os.getcwd()

    try:
        fs = FactorioState(pack_config.factorio_path, locale)

        for fn in os.listdir(pack_config.mods_path):
            fn = os.path.join(pack_config.mods_path, fn)
            if os.path.isdir(fn):
                fs.modlist.add_mod(fn)

        fs.modlist.resolve()
        fs.load_mods()

        locale.merge()
        data = fs.get_data()
    finally:
        os.chdir(oldcwd)

    mkdir_p(pack_dir)

    sort_dict(data, 'technology')

    with open('%s/out' % pack_dir, 'w') as f:
        f.write(json.dumps(data, indent=4))

    fs.save_gfx('%s/icon' % pack_dir)
    locale.save('%s/localedump.cfg' % pack_dir)

    pack_info = PackInfo(pack_config.name, pack_config.title, '', '', fs.modlist._loaded_names)

    with open('%s/info.json' % pack_dir, 'w') as f:
        f.write(pack_info.to_json())
Example #50
0
def mount_compressed(f):
    """Mount rar file to memory, and return the mounted directory"""

    if not check_support():
        return None # Do nothing

    p = get_shm_path(f)
    if os.path.exists(p): # Already uncompressed
        return p

    util.mkdir_p(p)
    tmp = os.getcwd()
    try:
        os.chdir(p)
        os.system('unrar e %s' % util.escape_path(f))
    except:
        pass
    os.chdir(tmp)

    cmd = 'echo %s >> %s' % (util.escape_path(p), os.path.join(SHM_ROOT, 'history'))
    os.system(cmd)

    return p
Example #51
0
 def download(self):
     """
     Download from the source URL via HTTP or git
     """
     print('downloading %s' % self.name)
     if self.ext == '.git':
         path = os.path.join(self.workdir, self.name)
         util.rm_rf(self.workdir)
         util.mkdir_p(self.workdir)
         os.chdir(self.workdir)
         subprocess.check_call(shlex.split('git clone %s %s' % (self.url, self.name)))
         self.unpack_name = self.name
         self.filename = self.name
     else:
         path = os.path.join(self.workdir, self.filename)
         if not os.path.exists(path): 
             util.rm_rf(self.workdir)
             util.mkdir_p(self.workdir)
             os.chdir(self.workdir)
             stream = urllib2.urlopen(self.url)
             fd = open(self.filename, 'wb')
             shutil.copyfileobj(stream, fd)
             fd.close()
Example #52
0
def create_aws_spot_vars(ec2_conf):
    init = yaml.load(open("conf/init.yml"))
    volumes = map(trans_to_volume, ec2_conf["Block_Device_Mapping"])
    # temporary folder for ansible to save info
    ansible_info_dir = tempfile.mkdtemp()

    var = {
        'zone':           ec2_conf["Availability_Zone"],
        'count':          init["MachineNumber"],
        'image':          ec2_conf["AMI"],
        'region':         ec2_conf["Region"],
        'volumes':        volumes,
        'key_name':       ec2_conf["Keypair"],
        'spot_price':     ec2_conf["Spot_Price"],
        'instance_type':  ec2_conf["Instance_Type"],
        'security_group': ec2_conf["Security_Group"],
        'ansible_info_dir': ansible_info_dir,
    }

    var_dir = 'spot/roles/create_ec2/vars'
    mkdir_p(var_dir)
    out = open(os.path.join(var_dir, 'main.yml'), 'w')
    yaml.dump(var, out, default_flow_style=False)
    out.close()
Example #53
0
def create(args):
    """
    Create a new package.
    """
    base = os.path.split(args.url)[1]
    base = base.split('.')[0]
    base = base.split('-')[0]
    base = base.split('_')[0]
    name = args.name or base

    template = """
import winbrew

class %(name)s(winbrew.Formula):
    url = '%(url)s'
    homepage = ''
    sha1 = ''
    build_deps = ()
    deps = ()

    def install(self):
        pass

    def test(self):
        pass
"""

    path = os.path.join(winbrew.formula_path, '%s.py' % name)
    if not os.path.exists(path):
        util.mkdir_p(os.path.split(path)[0]) 
        fd = open(path, 'w')
        fd.write(template % {'name': name.title(), 'url': args.url})
        fd.close()

    args.name = name
    edit(args)
Example #54
0
def test_prerequisites():
    '''check we have the right directories and tools to run tests'''
    print("Testing prerequisites")
    util.mkdir_p('buildlogs')
    return True
Example #55
0
            check_logs(step)
            continue
        results.add(step, '<span class="passed-text">PASSED</span>', time.time() - t1)
        print(">>>> PASSED STEP: %s at %s" % (step, time.asctime()))
        check_logs(step)
    if not passed:
        print("FAILED %u tests: %s" % (len(failed), failed))

    util.pexpect_close_all()

    write_fullresults()

    return passed


util.mkdir_p('buildlogs')

lck = util.lock_file('buildlogs/autotest.lck')
if lck is None:
    print("autotest is locked - exiting")
    sys.exit(0)

atexit.register(util.pexpect_close_all)

if len(args) > 0:
    # allow a wildcard list of steps
    matched = []
    for a in args:
        for s in steps:
            if fnmatch.fnmatch(s.lower(), a.lower()):
                matched.append(s)
Example #56
0
#!/usr/bin/env python2
# coding: utf-8

'''
Baixa todos os arquivos de dados de tramitação de projetos da
Câmara Municipal de São Paulo.
'''

import os
from lxml.html import parse
from urllib import urlretrieve
from configura import RAW_PATH
from util import mkdir_p

if '__main__' == __name__:

    url = ('http://www.camara.sp.gov.br/index.php?option=com_content'
           '&view=article&id=10008:detalhes-tramitacao-projetos-dados-abertos'
           '&catid=119')
    root = parse(url).getroot()

    arquivos_de_dados = root.xpath(
        '//p[contains(., "de arquivos")]/following-sibling::p/a/@href')

    for arquivo in arquivos_de_dados:
        path = os.path.join(RAW_PATH, 'tramitacoes')
        mkdir_p(path)
        urlretrieve(arquivo, os.path.join(path, arquivo.split('/').pop()))
Example #57
0
    params["title"] = title

    resp = requests.get(EN_SONG_SEARCH_URL, params=params)
    resp.raise_for_status()
    data = json.loads(resp.text)
    return bool(data["response"]["status"]["code"] == 0 and len(data["response"]["songs"]))


if __name__ == "__main__":
    import errno
    import os
    import sys
    import shutil
    import util

    util.mkdir_p("xml/existing")
    util.mkdir_p("xml/nonexistant")
    api_key = os.environ["ECHO_NEST_API_KEY"]

    if len(sys.argv) != 2:
        # No argument given, do entire xml/raw directory
        for dirpath, dirnames, filenames in os.walk("xml/raw"):
            for filename in filenames:
                full_path = os.path.join(dirpath, filename)
                print full_path,

                exist_path = os.path.join("xml/existing", filename)
                if os.path.isfile(exist_path):
                    print " o"
                    continue