예제 #1
0
def bundle(rootfs, region, bucket=None, size=10, filesystem='ext4'):
    _get_amitools_path()
    pems = _get_pem_paths()

    log.info('creating loopback, formatting and mounting')
    image_path = rootfs + '.img'
    image_mount = rootfs + '.img.mount'
    utils.mkdir(image_mount)
    executil.system('dd if=/dev/null of=%s bs=1 seek=%dG' % (image_path, size))
    executil.system('mkfs.' + filesystem, '-F', '-j', image_path)
    executil.system('mount -o loop', image_path, image_mount)

    log.info('syncing rootfs to loopback')
    utils.rsync(rootfs, image_mount)

    log.debug('umounting loopback')
    executil.system('umount', '-d', image_mount)
    os.removedirs(image_mount)

    log.debug('getting unique ami name')
    app = utils.get_turnkey_version(rootfs)
    ami_name = utils.get_uniquename(region, app + '.s3')
    log.info('target ami_name - %s ', ami_name)

    log.info('bundling loopback into ami')
    arch = utils.parse_imagename(ami_name)['architecture']
    _bundle_image(region, image_path, ami_name, arch, pems)
    os.remove(image_path)

    log.info('uploading bundled ami')
    bucket = bucket if bucket else "turnkeylinux-" + region
    _bundle_upload(region, ami_name, bucket)

    log.info("complete - %s %s", bucket, ami_name)
    return bucket, ami_name
def _install_influxdb():

    influxdb_source_url = ctx.node.properties['influxdb_rpm_source_url']

    influxdb_user = '******'
    influxdb_group = 'influxdb'
    influxdb_home = '/opt/influxdb'
    influxdb_log_path = '/var/log/cloudify/influxdb'

    ctx.logger.info('Installing InfluxDB...')
    utils.set_selinux_permissive()

    utils.copy_notice('influxdb')
    utils.mkdir(influxdb_home)
    utils.mkdir(influxdb_log_path)

    utils.yum_install(influxdb_source_url)
    utils.sudo(['rm', '-rf', '/etc/init.d/influxdb'])

    ctx.logger.info('Deploying InfluxDB config.toml...')
    utils.deploy_blueprint_resource(
        '{0}/config.toml'.format(CONFIG_PATH),
        '{0}/shared/config.toml'.format(influxdb_home))

    ctx.logger.info('Fixing user permissions...')
    utils.chown(influxdb_user, influxdb_group, influxdb_home)
    utils.chown(influxdb_user, influxdb_group, influxdb_log_path)

    utils.logrotate('influxdb')
    utils.systemd.configure('influxdb')
예제 #3
0
def create_cloudify_user():
    utils.create_service_user(
        user=utils.CLOUDIFY_USER,
        group=utils.CLOUDIFY_GROUP,
        home=utils.CLOUDIFY_HOME_DIR
    )
    utils.mkdir(utils.CLOUDIFY_HOME_DIR)
예제 #4
0
파일: method.py 프로젝트: kuntzer/sclas
	def save(self, filepath=None):
		if filepath is None:
			filepath = self.workdir
		utils.mkdir(filepath)
		filename = os.path.join(filepath, "%s.pkl" % self.get_name())
		utils.writepickle(self, filename)
		logger.info("Saved method %s to %s" % (self.get_name(), filename))
예제 #5
0
def build_src_project(bindings, jamaicaoutput, targetdir, syscalls, interfaceResolver, debug, classrefs):
	"""
	Construct the software portion of the project. Copy the C source code for the Jamaica project, 
	refactoring the functions that are implemented on the FPGA.
	Also copies the FPGA interface and build scripts. 
	
	bindings:
		A map {id -> java method signature} that gives the ID of each hardware method. 
		Generated from prepare_hls_project.build_from_functions
	jamaicaoutput:
		Absolute path of the jamaica builder output directory which contains the source C files
	targetdir:
		Absolute path to place output files
	"""
	if not os.path.isfile(join(jamaicaoutput, "Main__nc.o")):
		raise CaicosError("Cannot find file " + str(join(jamaicaoutput, "Main__nc.o")) + 
						". Ensure that the application has first be been built by Jamaica Builder.")
		
	mkdir(targetdir)
	copy_files(project_path("projectfiles", "juniper_fpga_interface"), join(targetdir, "juniper_fpga_interface"))
	copy_files(project_path("projectfiles", "malloc_preload"), join(targetdir, "malloc_preload"))
	refactor_src(bindings, jamaicaoutput, join(targetdir, "src"), debug)
	if debug:
		copy_files(project_path("debug_software"), join(targetdir, "src"))
	generate_interrupt_handler(join(targetdir, "src", "caicos_interrupts.c"), syscalls, interfaceResolver, classrefs)
	shutil.copy(join(jamaicaoutput, "Main__nc.o"), join(targetdir, "src"))
	shutil.copy(project_path("projectfiles", "include", "juniperoperations.h"), join(targetdir, "src"))
	shutil.copy(project_path("projectfiles", "scripts", "run.sh"), targetdir)
	make_executable([join(targetdir, "run.sh")])
예제 #6
0
    def export(self, path, module_name, manifest_name, parameters=None):

        files_path = os.path.join(path, 'files')
        utils.mkdir(files_path)

        templates_path = os.path.join(path, 'templates')
        utils.mkdir(templates_path)

        manifests_path = os.path.join(path, 'manifests')

        app_manifest = os.path.join(manifests_path, manifest_name+'.pp')
        fh = open(app_manifest, "r+")

        offset=0
        for line in fh:
            offset += len(line)
            if line.startswith("class"):
                break

        fh.seek(offset-2)

        if parameters:
            fh.write(self.write_parameters(parameters))

        fh.write("{\n\n")

        for name, file in self.files.iteritems():
            fh.write(file.export(path, module_name))
        fh.write("}")
        fh.close()
def install_amqpinflux():

    amqpinflux_rpm_source_url = \
        ctx.node.properties['amqpinflux_rpm_source_url']

    # injected as an input to the script
    ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \
        os.environ['INFLUXDB_ENDPOINT_IP']
    ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
        utils.get_rabbitmq_endpoint_ip()

    amqpinflux_user = '******'
    amqpinflux_group = 'amqpinflux'
    amqpinflux_venv = '{0}/env'.format(AMQPINFLUX_HOME)

    ctx.logger.info('Installing AQMPInflux...')
    utils.set_selinux_permissive()

    utils.copy_notice('amqpinflux')
    utils.mkdir(AMQPINFLUX_HOME)

    utils.yum_install(amqpinflux_rpm_source_url)
    _install_optional(amqpinflux_venv)
    utils.create_service_user(amqpinflux_user, AMQPINFLUX_HOME)
    _deploy_broker_configuration(amqpinflux_group)

    ctx.logger.info('Fixing permissions...')
    utils.chown(amqpinflux_user, amqpinflux_group, AMQPINFLUX_HOME)

    utils.systemd.configure('amqpinflux')
예제 #8
0
def configure_manager(manager_config_path,
                      manager_config):
    '''Sets config defaults and creates the config file'''
    _, temp_config = tempfile.mkstemp()
    config = ConfigParser()

    config.add_section('Credentials')
    config.set('Credentials', 'subscription_id',
               manager_config['subscription_id'])
    config.set('Credentials', 'tenant_id',
               manager_config['tenant_id'])
    config.set('Credentials', 'client_id',
               manager_config['client_id'])
    config.set('Credentials', 'client_secret',
               manager_config['client_secret'])

    config.add_section('Azure')
    config.set('Azure', 'location',
               manager_config['location'])

    with open(temp_config, 'w') as temp_config_file:
        config.write(temp_config_file)

    utils.mkdir(os.path.dirname(manager_config_path), use_sudo=True)
    utils.move(temp_config, manager_config_path)

    # Install prerequisites for the azure-storage Python package
    utils.yum_install('gcc', service_name='azure-storage')
    utils.yum_install('python-devel', service_name='azure-storage')
    utils.yum_install('openssl-devel', service_name='azure-storage')
    utils.yum_install('libffi-devel', service_name='azure-storage')
    utils.yum_install('python-cffi', service_name='azure-storage')
def _install_rabbitmq():
    erlang_rpm_source_url = ctx.node.properties['erlang_rpm_source_url']
    rabbitmq_rpm_source_url = ctx.node.properties['rabbitmq_rpm_source_url']
    # TODO: maybe we don't need this env var
    os.putenv('RABBITMQ_FD_LIMIT',
              str(ctx.node.properties['rabbitmq_fd_limit']))
    rabbitmq_log_path = '/var/log/cloudify/rabbitmq'
    rabbitmq_username = ctx.node.properties['rabbitmq_username']
    rabbitmq_password = ctx.node.properties['rabbitmq_password']
    rabbitmq_cert_public = ctx.node.properties['rabbitmq_cert_public']
    rabbitmq_ssl_enabled = ctx.node.properties['rabbitmq_ssl_enabled']
    rabbitmq_cert_private = ctx.node.properties['rabbitmq_cert_private']

    ctx.logger.info('Installing RabbitMQ...')
    utils.set_selinux_permissive()

    utils.copy_notice('rabbitmq')
    utils.mkdir(rabbitmq_log_path)

    utils.yum_install(erlang_rpm_source_url)
    utils.yum_install(rabbitmq_rpm_source_url)

    utils.logrotate('rabbitmq')

    utils.deploy_blueprint_resource(
        '{0}/kill-rabbit'.format(CONFIG_PATH),
        '/usr/local/bin/kill-rabbit')
    utils.chmod('500', '/usr/local/bin/kill-rabbit')

    utils.systemd.configure('rabbitmq')

    ctx.logger.info('Configuring File Descriptors Limit...')
    utils.deploy_blueprint_resource(
        '{0}/rabbitmq_ulimit.conf'.format(CONFIG_PATH),
        '/etc/security/limits.d/rabbitmq.conf')

    utils.systemd.systemctl('daemon-reload')

    utils.chown('rabbitmq', 'rabbitmq', rabbitmq_log_path)

    utils.systemd.start('cloudify-rabbitmq')

    time.sleep(10)
    utils.wait_for_port(5672)

    ctx.logger.info('Enabling RabbitMQ Plugins...')
    # Occasional timing issues with rabbitmq starting have resulted in
    # failures when first trying to enable plugins
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_management'],
               retries=5)
    utils.sudo(['rabbitmq-plugins', 'enable', 'rabbitmq_tracing'], retries=5)

    _clear_guest_permissions_if_guest_exists()
    _create_user_and_set_permissions(rabbitmq_username, rabbitmq_password)
    _set_security(
        rabbitmq_ssl_enabled,
        rabbitmq_cert_private,
        rabbitmq_cert_public)

    utils.systemd.stop('cloudify-rabbitmq', retries=5)
예제 #10
0
def _install_influxdb():

    influxdb_source_url = ctx_properties['influxdb_rpm_source_url']

    influxdb_user = '******'
    influxdb_group = 'influxdb'
    influxdb_home = '/opt/influxdb'
    influxdb_log_path = '/var/log/cloudify/influxdb'

    ctx.logger.info('Installing InfluxDB...')
    utils.set_selinux_permissive()

    utils.copy_notice(INFLUX_SERVICE_NAME)
    utils.mkdir(influxdb_home)
    utils.mkdir(influxdb_log_path)

    utils.yum_install(influxdb_source_url, service_name=INFLUX_SERVICE_NAME)
    utils.sudo(['rm', '-rf', '/etc/init.d/influxdb'])

    ctx.logger.info('Deploying InfluxDB config.toml...')
    utils.deploy_blueprint_resource(
        '{0}/config.toml'.format(CONFIG_PATH),
        '{0}/shared/config.toml'.format(influxdb_home),
        INFLUX_SERVICE_NAME)

    ctx.logger.info('Fixing user permissions...')
    utils.chown(influxdb_user, influxdb_group, influxdb_home)
    utils.chown(influxdb_user, influxdb_group, influxdb_log_path)

    utils.systemd.configure(INFLUX_SERVICE_NAME)
    # Provided with InfluxDB's package. Will be removed if it exists.
    utils.remove('/etc/init.d/influxdb')
    utils.logrotate(INFLUX_SERVICE_NAME)
예제 #11
0
def dump_upgrade_data():

    if os.path.exists(DUMP_SUCCESS_FLAG):
        return

    endpoint = _get_es_install_endpoint()
    port = _get_es_install_port()
    storage_endpoint = 'http://{0}:{1}/cloudify_storage'.format(endpoint,
                                                                port)
    types = ['provider_context', 'snapshot']
    ctx.logger.info('Dumping upgrade data: {0}'.format(types))
    type_values = []
    for _type in types:
        res = http_request('{0}/_search?q=_type:{1}&size=10000'
                           .format(storage_endpoint, _type),
                           method='GET')
        if not res.code == 200:
            ctx.abort_operation('Failed fetching type {0} from '
                                'cloudify_storage index'.format(_type))

        body = res.read()
        hits = json.loads(body)['hits']['hits']
        for hit in hits:
            type_values.append(hit)

    utils.mkdir(UPGRADE_DUMP_PATH, use_sudo=False)
    with open(DUMP_FILE_PATH, 'w') as f:
        for item in type_values:
            f.write(json.dumps(item) + os.linesep)

    # marker file to indicate dump has succeeded
    with open(DUMP_SUCCESS_FLAG, 'w') as f:
        f.write('success')
def _deploy_security_configuration():
    ctx.logger.info('Deploying REST Security configuration file...')

    # Generating random hash salt and secret key
    security_configuration = {
        'hash_salt': base64.b64encode(os.urandom(32)),
        'secret_key': base64.b64encode(os.urandom(32)),
        'encoding_alphabet': _random_alphanumeric(),
        'encoding_block_size': 24,
        'encoding_min_length': 5
    }

    # Pre-creating paths so permissions fix can work correctly
    # in mgmtworker
    for path in utils.MANAGER_RESOURCES_SNAPSHOT_PATHS:
        utils.mkdir(path)
    utils.chown(
        CLOUDIFY_USER, CLOUDIFY_GROUP,
        utils.MANAGER_RESOURCES_HOME)
    utils.sudo(['ls', '-la', '/opt/manager'])

    current_props = runtime_props['security_configuration']
    current_props.update(security_configuration)
    runtime_props['security_configuration'] = current_props

    fd, path = tempfile.mkstemp()
    os.close(fd)
    with open(path, 'w') as f:
        json.dump(security_configuration, f)
    rest_security_path = join(runtime_props['home_dir'], 'rest-security.conf')
    utils.move(path, rest_security_path)
    utils.chown(CLOUDIFY_USER, CLOUDIFY_GROUP, rest_security_path)
예제 #13
0
    def __init__(self, name='clusterer',
                 # model initialization
                 load_weights_from=None, weights_file=None, randomize_weights=False,
                 # network architecture
                 top_layers=3, learnable_layers=3, pooling='maxavg', risk_objective=True,
                 # dropout and learning rates
                 input_dropout=0, dropout=0.0, learning_rate=1e-7):
        assert pooling in ['max', 'avg', 'maxavg']

        self.name = name
        self.path = directories.CLUSTERERS + '/'
        utils.mkdir(self.path)

        self.load_weights_from = load_weights_from
        self.weights_file = weights_file
        self.randomize_weights = randomize_weights
        self.top_layers = top_layers
        self.learnable_layers = learnable_layers
        self.pooling = pooling
        self.risk_objective = risk_objective
        self.input_dropout = input_dropout
        self.dropout = dropout
        self.learning_rate = learning_rate

        self.single_size = 855 if directories.CHINESE else 674
        self.pair_size = 1733 if directories.CHINESE else 1370
        self.static_layers = top_layers - learnable_layers
        if self.static_layers == 0:
            self.anaphoricity_input_size = self.single_size
            self.pair_input_size = self.pair_size
        elif self.static_layers == 1:
            self.anaphoricity_input_size = self.pair_input_size = 1000
        else:
            self.anaphoricity_input_size = self.pair_input_size = 500
예제 #14
0
 def save(self):
     show_name = None
     if self.rageid is not None:
         show_name = self.rageid_show_name(self.rageid)
         if show_name is not None:
             show_name = unicode(show_name, 'utf-8')
     if show_name is None:
         manual_name = unikeyboard(self.nzbname, 'Enter show name')
         if manual_name is None:
             log("Tvshow: save: did not recieve a name for the TV-show")
             return
         #show_name = manual_name.decode("utf_8").encode("raw_unicode_escape")
         show_name = unicode(manual_name, 'utf-8').replace('\n','')
     strm_path_show = utils.join(self.strm_path, os.path.join(remove_disallowed_filename_chars(show_name),''))
     # Check if showname folder exist in path, if not create it.
     if not utils.exists(strm_path_show):
         try:
             utils.mkdir(strm_path_show)
         except:
             log("Tvshow: save: failed to create TV-show folder %s" % strm_path_show)
             return
     # Check if tvshow.nfo is there, if not, create it.
     tv_nfo = self.info
     tv_nfo.path(strm_path_show)
     # The Episode name has to be picked up by XBMC
     # regexps
     episode_name = self.check_episode_name(self.nzbname)
     if not self.save_nfo_type == "disabled":
         if self.save_nfo_type == "minimal":
             tv_nfo.mini()
         if not utils.exists(os.path.join(strm_path_show, 'tvshow.nfo')):
             tv_nfo.save_tvshow(show_name)
         # now, save the episodename.nfo
         tv_nfo.save_episode(episode_name)
     strm.StrmFile(strm_path_show, episode_name, self.nzb).save()
예제 #15
0
    def _combine_filename(self, names, max_length=60):
        # expect the parameter 'names' be something like this:
        # ['css/foo.css', 'css/jquery/datepicker.css']
        # The combined filename is then going to be
        # "/tmp/foo.datepicker.css"
        first_ext = os.path.splitext(names[0])[-1]
        save_dir = self.handler.application.settings.get('combined_static_dir')
        if save_dir is None:
            save_dir = gettempdir()
        save_dir = os.path.join(save_dir, 'combined')
        mkdir(save_dir)
        combined_name = []
        for name in names:
            name, ext = os.path.splitext(os.path.basename(name))
            if ext != first_ext:
                raise ValueError("Mixed file extensions (%s, %s)" %\
                 (first_ext, ext))
            combined_name.append(name)
        if sum(len(x) for x in combined_name) > max_length:
            combined_name = [x.replace('.min','.m').replace('.pack','.p')
                             for x in combined_name]
            combined_name = [re.sub(r'-[\d\.]+', '', x) for x in combined_name]
            while sum(len(x) for x in combined_name) > max_length:
                try:
                    combined_name = [x[-2] == '.' and x[:-2] or x[:-1]
                                 for x in combined_name]
                except IndexError:
                    break

        combined_name.append(first_ext[1:])
        return os.path.join(save_dir, '.'.join(combined_name))
예제 #16
0
파일: handlers.py 프로젝트: daqing15/tiler
    def make_destination(self, fileid):
        root = os.path.join(
            self.application.settings['static_path'],
            'uploads'
        )
        if not os.path.isdir(root):
            os.mkdir(root)
        destination = os.path.join(
            root,
            fileid[:1],
            fileid[1:3],
        )
        # so far, it's the directory
        mkdir(destination)
        # this is the full file path
        destination += '/%s' % fileid[3:]
        content_type = self.redis.get('contenttype:%s' % fileid)
        # complete it with the extension
        if content_type == 'image/png':
            destination += '.png'
        else:
            assert content_type == 'image/jpeg', content_type
            destination += '.jpg'

        return destination
예제 #17
0
def peek(dat, folder, force=False):
    g = Globals()
    outf = g.default_repo_dir

    mkdir(outf + "peek")
    mkdir(outf + "peek/" + dat)

    print("\nPeeking " + folder + " for " + dat)
    dir = outf + "samples/" + dat + "/" + folder
    print("dir", dir)

    if (not force) and (os.path.exists(outf + 'peek/%s/%s.png' % (dat, folder.replace("/", "_")))):
        print("Already peeked before. Now exit.")
        return

    dataset = dset.ImageFolder(root=dir,
                               transform=transforms.Compose([
                                   transforms.ToTensor(),
                                   transforms.Normalize(
                                       (0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                               ]))
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=96,
                                             shuffle=True, num_workers=2)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        saveImage(img, outf + 'peek/%s/%s.png' %
                  (dat, folder.replace("/", "_")), nrow=12)
        break
예제 #18
0
def install_amqpinflux():

    amqpinflux_rpm_source_url = \
        ctx_properties['amqpinflux_rpm_source_url']

    # injected as an input to the script
    ctx.instance.runtime_properties['influxdb_endpoint_ip'] = \
        os.environ['INFLUXDB_ENDPOINT_IP']
    rabbit_props = utils.ctx_factory.get('rabbitmq')
    ctx.instance.runtime_properties['rabbitmq_endpoint_ip'] = \
        utils.get_rabbitmq_endpoint_ip()
    ctx.instance.runtime_properties['rabbitmq_username'] = \
        rabbit_props.get('rabbitmq_username')
    ctx.instance.runtime_properties['rabbitmq_password'] = \
        rabbit_props.get('rabbitmq_password')
    ctx.instance.runtime_properties['rabbitmq_ssl_enabled'] = True

    amqpinflux_venv = '{0}/env'.format(HOME_DIR)

    ctx.logger.info('Installing AQMPInflux...')
    utils.set_selinux_permissive()

    utils.copy_notice(SERVICE_NAME)
    utils.mkdir(HOME_DIR)

    utils.yum_install(amqpinflux_rpm_source_url,
                      service_name=SERVICE_NAME)
    _install_optional(amqpinflux_venv)

    ctx.logger.info('Configuring AMQPInflux...')
    utils.create_service_user(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR)
    ctx.instance.runtime_properties['broker_cert_path'] = \
        utils.INTERNAL_CERT_PATH
    utils.chown(AMQPINFLUX_USER, AMQPINFLUX_GROUP, HOME_DIR)
    utils.systemd.configure(SERVICE_NAME)
예제 #19
0
파일: __analysis__.py 프로젝트: elaird/supy
 def makeFileList(name) :
     fName = self.inputFilesListFile(name)
     if os.path.exists(fName) and self.useCachedFileLists() : return
     fileNames = eval(self.sampleDict[name].filesCommand)
     assert fileNames, "The command '%s' produced an empty list of files"%self.sampleDict[name].filesCommand
     utils.mkdir(os.path.dirname(fName))
     utils.writePickle(fName, zip(fileNames,map(nEventsFile, fileNames)))
예제 #20
0
    def set_settings(self, options, args):
        self.reporter = reporters.XunitReporter(options)
        if not options.logsdir in[sys.stderr, sys.stdout]:
            mkdir(options.logsdir)

        self.options = options
        wanted_testers = None
        for tester in self.testers:
            if tester.name in args:
                wanted_testers = tester.name

        if wanted_testers:
            testers = self.testers
            self.testers = []
            for tester in testers:
                if tester.name in args:
                    self.testers.append(tester)
                    args.remove(tester.name)

        if options.config:
            self._load_config(options)

        self._load_testsuites()

        for tester in self.testers:
            tester.set_settings(options, args, self.reporter)

        if not options.config and options.testsuites:
            self._setup_testsuites()
예제 #21
0
def install_logstash():
    """Install logstash as a systemd service."""
    logstash_unit_override = '/etc/systemd/system/logstash.service.d'

    logstash_source_url = ctx_properties['logstash_rpm_source_url']
    logstash_log_path = '/var/log/cloudify/logstash'

    ctx.logger.info('Installing Logstash...')
    utils.set_selinux_permissive()
    utils.copy_notice(LOGSTASH_SERVICE_NAME)

    utils.yum_install(logstash_source_url, service_name=LOGSTASH_SERVICE_NAME)

    install_logstash_output_jdbc_plugin()
    install_postgresql_jdbc_driver()

    utils.mkdir(logstash_log_path)
    utils.chown('logstash', 'logstash', logstash_log_path)

    ctx.logger.debug('Creating systemd unit override...')
    utils.mkdir(logstash_unit_override)
    utils.deploy_blueprint_resource(
        '{0}/restart.conf'.format(CONFIG_PATH),
        '{0}/restart.conf'.format(logstash_unit_override),
        LOGSTASH_SERVICE_NAME)
예제 #22
0
    def _link_bundle(self, bundle, undo_log):
        utils.log("Linking bundle '%s'" % bundle)

        for dirpath, dirnames, filenames, relpath in \
                self._walk_bundle(bundle):

            for dirname in dirnames:
                if self._ignore_match(dirname):
                    continue
                src_dirpath = os.path.join(dirpath, dirname)
                dst_dirpath = os.path.join(self.root_path, relpath, dirname)
                if self._is_directory_tracked(src_dirpath):
                    utils.symlink(src_dirpath, dst_dirpath,
                                  dry_run=self.dry_run, undo_log=undo_log)
                else:
                    utils.mkdir(dst_dirpath, dry_run=self.dry_run,
                                undo_log=undo_log)

            for filename in filenames:
                if self._ignore_match(filename):
                    continue
                src_filename = os.path.join(dirpath, filename)
                dst_filename = os.path.join(self.root_path, relpath, filename)
                utils.symlink(src_filename, dst_filename,
                              dry_run=self.dry_run, undo_log=undo_log)
예제 #23
0
    def _downLoad_lyricFromHttp(self, lyric):
##        print 'downLoad_lyric'
        cwd = os.getcwd()
        print 'lyric.lrc:',lyric.lrc
        url = lyric.lrc

        p = re.compile(r'lrc/(\w+)/(\w+)/(\S+)')
        m = p.search(url)
        #m = re.search(r"lrc/(\w+)/(\w+)/(\S+)", url)

        lyric_save_path = u'www/'
        lyric_save_path += m.group(0)

        mkdir(os.path.dirname(lyric_save_path))

        lyric_content = ""

        f = urllib2.urlopen(url)

        data = f.read()

        lyric_save_absolute_path = os.path.join(cwd,lyric_save_path)
##        print 'downLoad_lyric lyric_save_absolute_path:',lyric_save_absolute_path

        with open(lyric_save_absolute_path, "wb") as code:
            code.write(data)
            lyric_content += data

        lyric.local_path = lyric_save_path
        lyric.sizes = len(data)

        return lyric_content
예제 #24
0
 def _execute(self, cycle_id):
     '''
     drive testkit-lite to execute current test sequences defined by cycle id.
     @type cycle_id: int 
     @param cycle_id: the id of current cycle
     @rtype: None
     @return: None
     '''
     if not self._converter:
         self._converter = convert()
     plan_name = '%s%s%s' % ('cycle_', str(cycle_id), '.xml')
     plan_folder_path = mkdir(PLAN_OUTPUT_WORKSPACE)
     result_output_path = mkdir('%s%s' % (join(RESULT_OUTPUT_WORKSPACE, 'cycle_'), str(cycle_id)))   
     engine_output_path = mkdir(join(result_output_path,'output'))
     result_output_file_path = join(result_output_path,'result.xml')
     destination_xml = self._converter(self._option['plan'], join(plan_folder_path, plan_name), engine_output_path)
     if not destination_xml: raise Exception('the ori plan file parse error!')
     task = '%s %s %s %s %s %s %s' % (self._engine, '-f', destination_xml, '--comm', 'localhost', '-o', result_output_file_path)
     proc = None
     try:
         proc = call(task, shell=True)
         proc.wait()
     except :
         if proc:
             proc.wait()
             sys.exit(1)
     finally:
         if self._option['upload']:
             uploader.upload(cycle_id, result_output_path)
     return cycle_id, result_output_path
def save_training_TP_FP_using_voc(evaluation, img_names, in_path, out_folder_name=None, neg_thresh=0.3):
    '''use the voc scores to decide if a patch should be saved as a TP or FP or not
    '''
    assert out_folder_name is not None
    general_path = utils.get_path(neural=True, data_fold=utils.TRAINING, in_or_out=utils.IN, out_folder_name=out_folder_name)
    path_true = general_path+'truepos_from_selective_search/'
    utils.mkdir(path_true)

    path_false = general_path+'falsepos_from_selective_search/'
    utils.mkdir(path_false)

    for img_name in img_names:
        good_detections = defaultdict(list)
        bad_detections = defaultdict(list)
        try:
            img = cv2.imread(in_path+img_name, flags=cv2.IMREAD_COLOR)
        except:
            print 'Cannot open image'
            sys.exit(-1)

        for roof_type in utils.ROOF_TYPES:
            detection_scores = evaluation.detections.best_score_per_detection[img_name][roof_type]
            for detection, score in detection_scores:
                if score > 0.5:
                    #true positive
                    good_detections[roof_type].append(detection)
                if score < neg_thresh:
                    #false positive
                    bad_detections[roof_type].append(detection)
                
        for roof_type in utils.ROOF_TYPES:
            extraction_type = 'good'
            save_training_FP_and_TP_helper(img_name, evaluation, good_detections[roof_type], path_true, general_path, img, roof_type, extraction_type, (0,255,0))               
            extraction_type = 'background'
            save_training_FP_and_TP_helper(img_name, evaluation, bad_detections[roof_type], path_false, general_path, img, roof_type, extraction_type, (0,0,255))               
 def download(source):
     resource_base_dir = utils.resource_factory.get_resources_dir(
             TEST_SERVICE_NAME)
     resource_path = os.path.join(resource_base_dir, 'tmp-res-name')
     utils.mkdir(resource_base_dir)
     utils.write_to_json_file('port: 8080', resource_path)
     return resource_path
예제 #27
0
def downloadAssignments(url, handin=False):
    assignments = getAssignments(url)
    os.mkdir("assignments")
    for k, v in assignments.items():
        dirpath = mkdir("assignments", k)
        for item in v:
            print("Processing %s/%s" % (k, item["Nafn"][0]))
            path = mkdir(dirpath, item["Nafn"][0])
            assignment = getAssignment(item["Nafn"][1])

            # Description
            descr = os.path.join(path, "description")
            os.mkdir(descr)
            jsondump(os.path.join(descr,"description.json"), assignment["description"])
            genHtml(os.path.join(descr, "description.html"), assignment["description"]["description"], assignment["description"]["title"])
            for item in assignment["description"]["files"]:
                save(descr, item["url"])

            if handin:
                if "grade" in assignment:
                    grade = os.path.join(path, "grade")
                    os.mkdir(grade)
                    jsondump(os.path.join(grade, "grade.json"), assignment["grade"])
                    for item in assignment["grade"]["files"]:
                        save(grade, item["url"])

                if "handin" in assignment:
                    handin = os.path.join(path, "handin")
                    os.mkdir(handin)
                    jsondump(os.path.join(handin, "handin.json"), assignment["handin"])

                if "statistics" in assignment:
                    jsondump(os.path.join(path, "stats.json"), assignment["statistics"])
                    if assignment["statistics"]["image"] is not None:
                        save(os.path.join(path, "stats.jpg"), assignment["statistics"]["image"])
예제 #28
0
파일: log.py 프로젝트: andr3wmac/metaTower
def start(log_level = 0):
    global main_pid, log_dir

    # log level
    if ( log_level != 0 ): setLevel(log_level)

    # check for old logs
    if ( os.path.isdir(log_dir) ):
        files = os.listdir(log_dir)
        if ( len(files) > 0 ):

            # check if old logs already exists
            old_log_dir = os.path.join(log_dir, "old")
            if ( os.path.isdir(old_log_dir) ): utils.rmdir(old_log_dir)
            utils.mkdir(old_log_dir)
            
            for f in files:
                path = os.path.join(log_dir, f)            
                if ( os.path.isdir(path) ): continue            
                os.rename(path, os.path.join(old_log_dir, f))
    else:
        utils.mkdir(log_dir)

    # set the main process id so we know where it began.
    main_pid = os.getpid()
def copy_project_files(
    targetdir, jamaicaoutputdir, fpgapartname, filestobuild, reachable_functions, syscalls, interfaceResolver, classrefs
):
    """
	Prepare an HLS project. Copies all required files from the local 'projectfiles' dir into targetdir
	along with any extra required files.
	Args:
		targetdir: directory to output to
		jamaicaoutputdir: absolute path that contains the output of Jamaica builder
		fgpapartname: string of the fpga part name
		filestobuild: array of absolute file paths and will be added to the HLS tcl script as source files
		reachable_functions: array of FuncDecl nodes that are reachable and require translation
		syscalls: map{string->int} names of function calls that should be translated to PCIe system calls -> ID of call
	"""
    mkdir(targetdir)
    copy_files(project_path("projectfiles", "include"), join(targetdir, "include"), [".h"])
    copy_files(project_path("projectfiles", "src"), join(targetdir, "src"), [".h", ".c"])
    shutil.copy(join(jamaicaoutputdir, "Main__.h"), join(targetdir, "include"))

    for f in filestobuild:
        if (
            not os.path.basename(f) == "fpgaporting.c"
        ):  # We needed fpgaporting to perform reachability analysis, but don't rewrite it
            log().info("Adding source file: " + f)
            if f.endswith(".c"):  # We only parse C files
                targetfile = os.path.join(targetdir, "src", os.path.basename(f))
                rewrite_source_file(f, targetfile, reachable_functions, syscalls, interfaceResolver, classrefs)
예제 #30
0
    def copy_single_distro_files(self, d, dirtree, symlink_ok):
        distros = os.path.join(dirtree, "images")
        distro_dir = os.path.join(distros,d.name)
        utils.mkdir(distro_dir)
        kernel = utils.find_kernel(d.kernel) # full path
        initrd = utils.find_initrd(d.initrd) # full path

        if kernel is None:
            raise CX("kernel not found: %(file)s, distro: %(distro)s" % 
                    { "file" : d.kernel, "distro" : d.name })

        if initrd is None:
            raise CX("initrd not found: %(file)s, distro: %(distro)s" % 
                    { "file" : d.initrd, "distro" : d.name })

        # Kernels referenced by remote URL are passed through to koan directly,
        # no need for copying the kernel locally:
        if not utils.file_is_remote(kernel):
            b_kernel = os.path.basename(kernel)
            dst1 = os.path.join(distro_dir, b_kernel)
            utils.linkfile(kernel, dst1, symlink_ok=symlink_ok, 
                    api=self.api, logger=self.logger)

        if not utils.file_is_remote(initrd):
            b_initrd = os.path.basename(initrd)
            dst2 = os.path.join(distro_dir, b_initrd)
            utils.linkfile(initrd, dst2, symlink_ok=symlink_ok, 
                    api=self.api, logger=self.logger)
예제 #31
0
from warmup_scheduler import GradualWarmupScheduler

######### Set Seeds ###########
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)

start_epoch = 1
mode = opt.MODEL.MODE
session = opt.MODEL.SESSION
Loss_mode = 'L1'
result_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'results', session)
model_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'models', session)

utils.mkdir(result_dir)
utils.mkdir(model_dir)

train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
save_images = opt.TRAINING.SAVE_IMAGES

######### Model ###########
model_restoration = MIRNet(in_channels=3,
                           out_channels=3,
                           n_feat=64,
                           kernel_size=3,
                           stride=2,
                           n_RRG=3,
                           n_MSRB=2,
                           height=3,
예제 #32
0
    # sample
    f_sample = generator(z, training=False)
""" train """
''' init '''
# session
sess = utils.session()
# iteration counter
it_cnt, update_cnt = utils.counter()
# saver
saver = tf.train.Saver(max_to_keep=5)
# summary writer
summary_writer = tf.summary.FileWriter('./summaries/celeba_wgan', sess.graph)
''' initialization '''
ckpt_dir = './checkpoints/celeba_wgan'
utils.mkdir(ckpt_dir + '/')
if not utils.load_checkpoint(ckpt_dir, sess):
    sess.run(tf.global_variables_initializer())
''' train '''
try:
    z_ipt_sample = np.random.normal(size=[100, z_dim])

    batch_epoch = len(data_pool) // (batch_size * n_critic)
    max_it = epoch * batch_epoch
    for it in range(sess.run(it_cnt), max_it):
        sess.run(update_cnt)

        # which epoch
        epoch = it // batch_epoch
        it_epoch = it % batch_epoch + 1
예제 #33
0
                    type=str,
                    help='Path to weights')
parser.add_argument('--gpus',
                    default='0',
                    type=str,
                    help='CUDA_VISIBLE_DEVICES')
parser.add_argument('--save_images',
                    action='store_true',
                    help='Save denoised images in result directory')

args = parser.parse_args()

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus

utils.mkdir(args.result_dir)

test_dataset = get_validation_data(args.input_dir)
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=16,
                         shuffle=False,
                         num_workers=8,
                         drop_last=False)

model_restoration = DenoiseNet()

utils.load_checkpoint(model_restoration, args.weights)
print("===>Testing using weights: ", args.weights)

model_restoration.cuda()
예제 #34
0
from warmup_scheduler import GradualWarmupScheduler

######### Set Seeds ###########
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)

start_epoch = 1
mode = opt.MODEL.MODE
session = opt.MODEL.SESSION

result_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'results', session)
model_dir = os.path.join(opt.TRAINING.SAVE_DIR, mode, 'models', session)

utils.mkdir(result_dir)
utils.mkdir(model_dir)

train_dir = opt.TRAINING.TRAIN_DIR
val_dir = opt.TRAINING.VAL_DIR
save_images = opt.TRAINING.SAVE_IMAGES

######### Model ###########
model_restoration = MIRNet()
model_restoration.cuda()

device_ids = [i for i in range(torch.cuda.device_count())]
if torch.cuda.device_count() > 1:
    print("\n\nLet's use", torch.cuda.device_count(), "GPUs!\n\n")

new_lr = opt.OPTIM.LR_INITIAL
예제 #35
0
def setup():
    utils.mkdir(args.data)
    utils.mkdir(args.data + "train/")
    utils.mkdir(args.data + "train_reduced/")
    utils.mkdir(args.data + "test/")
    utils.mkdir(args.data + "test_reduced/")
예제 #36
0
parser.add_argument('--num_worlds', type=int, default=1000)
parser.add_argument('--vis_path', type=str, default='data/example_vis/')
parser.add_argument('--save_path', type=str, default='data/example_env/')
parser.add_argument('--dim', type=int, default=10)
parser.add_argument('--mode',
                    type=str,
                    default='local',
                    choices=['local', 'global'])
parser.add_argument('--only_global', type=bool, default=False)
parser.add_argument('--sprite_dim', type=int, default=100)
parser.add_argument('--num_steps', type=int, default=10)
args = parser.parse_args()

print args, '\n'

utils.mkdir(args.vis_path)
utils.mkdir(args.save_path)

if args.mode == 'local':
    from environment.NonUniqueGenerator import NonUniqueGenerator
    gen = NonUniqueGenerator(environment.figure_library.objects,
                             environment.figure_library.unique_instructions,
                             shape=(args.dim, args.dim),
                             num_steps=args.num_steps,
                             only_global=args.only_global)
elif args.mode == 'global':
    from environment.GlobalGenerator import GlobalGenerator
    gen = GlobalGenerator(environment.figure_library.objects,
                          environment.figure_library.unique_instructions,
                          shape=(args.dim, args.dim),
                          num_steps=args.num_steps,
예제 #37
0
import utils
import traceback
import numpy as np
import tensorflow as tf
import data_mnist as data
import models_mnist as models
import time
""" param """
epoch = 100
batch_size = 64
lr = 0.0002
z_dim = 500
n_critic = 5
gpu_id = 3
''' data '''
utils.mkdir('./data/mnist/')
data.mnist_download('./data/mnist')
imgs, _, _ = data.mnist_load('./data/mnist')
imgs.shape = imgs.shape + (1, )
data_pool = utils.MemoryData({'img': imgs}, batch_size)
""" graphs """
with tf.device('/gpu:%d' % gpu_id):
    ''' models '''
    generator = models.generator
    discriminator = models.discriminator_wgan_gp
    ''' graph '''
    # inputs
    real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])

    # generate
예제 #38
0
def getDat(opt, dataList, outf, mixType="pix", singleFolder=False):
    size = []
    remain = opt.mixSize
    for entry in dataList:
        size.append(int(entry.fraction * opt.mixSize))  # get the correct size
        remain -= size[-1]  # update remain
    size[-1] += remain  # add the rest to the last bucket.
    assert (sum(size) == opt.mixSize)  # should add up to 1
    dat = torch.FloatTensor()
    tot = 0
    if singleFolder and os.path.exists(outf + "/mark"):
        print("Already generated before. Now exit.")
        dat = torch.load(outf + "/img.pth")
        return dat
    shutil.rmtree(outf, ignore_errors=True)
    mkdir(outf)

    if mixType == "pix":  # mix of images..

        def giveName(iter):  # 7 digit name.
            ans = str(iter)
            return '0' * (7 - len(ans)) + ans

        subfolder = -1
        subfolderSize = 600
        for entry, s in zip(dataList, size):  # should sample it one by one

            print(entry.data, entry.folder, s)
            opt.dir = g.default_repo_dir + "samples/" + entry.data + "/" + entry.folder

            opt.manualSeed = random.randint(1, 10000)  # fix seed
            random.seed(opt.manualSeed)
            torch.manual_seed(opt.manualSeed)

            # can take some transform defined by preprocess
            dataset = dset.ImageFolder(root=opt.dir, transform=entry.transform)
            dataloader = torch.utils.data.DataLoader(dataset,
                                                     batch_size=96,
                                                     shuffle=True,
                                                     num_workers=2)

            count = 0
            # should contain more image than one need
            assert (len(dataset) >= s)
            avg_img = get_avg(dataloader)

            for i, data in enumerate(dataloader, 0):
                img, _ = data
                for candidate in img:  # add images one by one
                    # The line below is special design for dup>1 case
                    image = candidate if entry.dup == 1 else avg_img
                    for kth in range(0, entry.dup):  # duplicated images...
                        if tot == 0:
                            dat.resize_(
                                opt.mixSize,
                                image.size(0) * image.size(1) * image.size(2))
                        if tot % subfolderSize == 0:
                            subfolder += 1
                            mkdir(outf + "/" + str(subfolder))
                        saveImage(
                            image, outf + "/" + str(subfolder) + "/" +
                            giveName(tot) + ".png")
                        dat[tot].fill_(0)
                        dat[tot] += image.resize_(image.nelement()) * 0.5 + 0.5
                        tot += 1
                        count += 1
                        if count == s:  # done copying
                            break
                    if count == s:  # done copying
                        break
                if count == s:  # done copying
                    break
        peek("Mix", os.path.basename(os.path.normpath(outf)), force=True)

        if singleFolder:
            torch.save(dat, outf + "/img.pth")
            torch.save([], outf + "/mark")

        return dat
    else:
        last = 0
        for entry, s in zip(dataList, size):  # should sample it one by one
            if entry.imageMode == 0:
                # no transformation, read features directly
                featureFile = g.default_feature_dir + entry.data + \
                    "/" + entry.folder + "_" + mixType + ".pth"

                featureM = torch.load(featureFile)

            else:
                # need transformation, no test
                opt.dir = g.default_repo_dir + "samples/" + entry.data + "/" + entry.folder
                dataset = dset.ImageFolder(root=opt.dir,
                                           transform=entry.transform)
                dataloader = torch.utils.data.DataLoader(dataset,
                                                         batch_size=96,
                                                         shuffle=True,
                                                         num_workers=2)

                resnet = getattr(models, 'resnet34')(pretrained=True)
                print('Using resnet34 with pretrained weights.')
                resnet.to(device).eval()
                resnet_feature = nn.Sequential(resnet.conv1, resnet.bn1,
                                               resnet.relu, resnet.maxpool,
                                               resnet.layer1, resnet.layer2,
                                               resnet.layer3, resnet.layer4)
                feature_conv, feature_smax, feature_class = [], [], []
                for img, _ in tqdm(dataloader):
                    input = Variable(img.to(device), volatile=True)
                    fconv = resnet_feature(input)
                    fconv = fconv.mean(3).mean(2).squeeze()
                    flogit = resnet.fc(fconv)
                    fsmax = F.softmax(flogit)
                    feature_conv.append(fconv.data.cpu())
                    feature_class.append(flogit.data.cpu())
                    feature_smax.append(fsmax.data.cpu())
                feature_conv = torch.cat(feature_conv, 0)
                feature_class = torch.cat(feature_class, 0)
                feature_smax = torch.cat(feature_smax, 0)

                if mixType.find('conv') >= 0:
                    featureM = feature_conv
                elif mixType.find('smax') >= 0:
                    featureM = feature_smax
                elif mixType.find('class') >= 0:
                    featureM = feature_class
                else:
                    raise NotImplementedError

            randP = torch.randperm(len(featureM))  # random permutation
            if last == 0:
                dat.resize_(opt.mixSize, featureM.size(1))
            dat[last:last + s].copy_(featureM.index_select(0, randP[:s]))
            last += s

        torch.save(dat, outf + "/feature_" + mixType)
        return dat
예제 #39
0
            randP = torch.randperm(len(featureM))  # random permutation
            if last == 0:
                dat.resize_(opt.mixSize, featureM.size(1))
            dat[last:last + s].copy_(featureM.index_select(0, randP[:s]))
            last += s

        torch.save(dat, outf + "/feature_" + mixType)
        return dat


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--mixSize',
                        type=int,
                        default=2000,
                        help='the mix size')
    opt = parser.parse_args()
    mkdir(g.default_repo_dir + "samples/Mix")
    dataList = [
        Ent(
            0.8, 'mnist', 'true',
            transforms.Compose([
                transforms.Resize(64),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ])),
        Ent(0.2, 'celeba', 'true', None)
    ]
    getDat(opt, dataList, g.default_repo_dir + "samples/Mix/M1")
예제 #40
0
파일: train.py 프로젝트: liuf1990/vision
def main(args):
    if args.output_dir:
        utils.mkdir(args.output_dir)

    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    if args.use_deterministic_algorithms:
        torch.backends.cudnn.benchmark = False
        torch.use_deterministic_algorithms(True)
    else:
        torch.backends.cudnn.benchmark = True

    # Data loading code
    print("Loading data")
    traindir = os.path.join(args.data_path, "train")
    valdir = os.path.join(args.data_path, "val")

    print("Loading training data")
    st = time.time()
    cache_path = _get_cache_path(traindir, args)
    transform_train = presets.VideoClassificationPresetTrain(crop_size=(112,
                                                                        112),
                                                             resize_size=(128,
                                                                          171))

    if args.cache_dataset and os.path.exists(cache_path):
        print(f"Loading dataset_train from {cache_path}")
        dataset, _ = torch.load(cache_path)
        dataset.transform = transform_train
    else:
        if args.distributed:
            print(
                "It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster"
            )
        dataset = datasets.KineticsWithVideoId(
            args.data_path,
            frames_per_clip=args.clip_len,
            num_classes=args.kinetics_version,
            split="train",
            step_between_clips=1,
            transform=transform_train,
            frame_rate=args.frame_rate,
            extensions=(
                "avi",
                "mp4",
            ),
            output_format="TCHW",
        )
        if args.cache_dataset:
            print(f"Saving dataset_train to {cache_path}")
            utils.mkdir(os.path.dirname(cache_path))
            utils.save_on_master((dataset, traindir), cache_path)

    print("Took", time.time() - st)

    print("Loading validation data")
    cache_path = _get_cache_path(valdir, args)

    if args.weights and args.test_only:
        weights = torchvision.models.get_weight(args.weights)
        transform_test = weights.transforms()
    else:
        transform_test = presets.VideoClassificationPresetEval(
            crop_size=(112, 112), resize_size=(128, 171))

    if args.cache_dataset and os.path.exists(cache_path):
        print(f"Loading dataset_test from {cache_path}")
        dataset_test, _ = torch.load(cache_path)
        dataset_test.transform = transform_test
    else:
        if args.distributed:
            print(
                "It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster"
            )
        dataset_test = datasets.KineticsWithVideoId(
            args.data_path,
            frames_per_clip=args.clip_len,
            num_classes=args.kinetics_version,
            split="val",
            step_between_clips=1,
            transform=transform_test,
            frame_rate=args.frame_rate,
            extensions=(
                "avi",
                "mp4",
            ),
            output_format="TCHW",
        )
        if args.cache_dataset:
            print(f"Saving dataset_test to {cache_path}")
            utils.mkdir(os.path.dirname(cache_path))
            utils.save_on_master((dataset_test, valdir), cache_path)

    print("Creating data loaders")
    train_sampler = RandomClipSampler(dataset.video_clips,
                                      args.clips_per_video)
    test_sampler = UniformClipSampler(dataset_test.video_clips,
                                      args.clips_per_video)
    if args.distributed:
        train_sampler = DistributedSampler(train_sampler)
        test_sampler = DistributedSampler(test_sampler, shuffle=False)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=args.batch_size,
        sampler=train_sampler,
        num_workers=args.workers,
        pin_memory=True,
        collate_fn=collate_fn,
    )

    data_loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=args.batch_size,
        sampler=test_sampler,
        num_workers=args.workers,
        pin_memory=True,
        collate_fn=collate_fn,
    )

    print("Creating model")
    model = torchvision.models.video.__dict__[args.model](weights=args.weights)
    model.to(device)
    if args.distributed and args.sync_bn:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    criterion = nn.CrossEntropyLoss()

    lr = args.lr * args.world_size
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scaler = torch.cuda.amp.GradScaler() if args.amp else None

    # convert scheduler to be per iteration, not per epoch, for warmup that lasts
    # between different epochs
    iters_per_epoch = len(data_loader)
    lr_milestones = [
        iters_per_epoch * (m - args.lr_warmup_epochs)
        for m in args.lr_milestones
    ]
    main_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=lr_milestones, gamma=args.lr_gamma)

    if args.lr_warmup_epochs > 0:
        warmup_iters = iters_per_epoch * args.lr_warmup_epochs
        args.lr_warmup_method = args.lr_warmup_method.lower()
        if args.lr_warmup_method == "linear":
            warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
                optimizer,
                start_factor=args.lr_warmup_decay,
                total_iters=warmup_iters)
        elif args.lr_warmup_method == "constant":
            warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
                optimizer,
                factor=args.lr_warmup_decay,
                total_iters=warmup_iters)
        else:
            raise RuntimeError(
                f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported."
            )

        lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
            optimizer,
            schedulers=[warmup_lr_scheduler, main_lr_scheduler],
            milestones=[warmup_iters])
    else:
        lr_scheduler = main_lr_scheduler

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    if args.resume:
        checkpoint = torch.load(args.resume, map_location="cpu")
        model_without_ddp.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
        args.start_epoch = checkpoint["epoch"] + 1
        if args.amp:
            scaler.load_state_dict(checkpoint["scaler"])

    if args.test_only:
        # We disable the cudnn benchmarking because it can noticeably affect the accuracy
        torch.backends.cudnn.benchmark = False
        torch.backends.cudnn.deterministic = True
        evaluate(model, criterion, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader,
                        device, epoch, args.print_freq, scaler)
        evaluate(model, criterion, data_loader_test, device=device)
        if args.output_dir:
            checkpoint = {
                "model": model_without_ddp.state_dict(),
                "optimizer": optimizer.state_dict(),
                "lr_scheduler": lr_scheduler.state_dict(),
                "epoch": epoch,
                "args": args,
            }
            if args.amp:
                checkpoint["scaler"] = scaler.state_dict()
            utils.save_on_master(
                checkpoint, os.path.join(args.output_dir,
                                         f"model_{epoch}.pth"))
            utils.save_on_master(
                checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print(f"Training time {total_time_str}")
def folder_sampler(opt):
    opt.workers = 2
    opt.imageSize = 64
    opt.batchSize = 600
    opt.outTrueA = 'true/'
    opt.outTrueB = 'true_test/'
    opt.outTrueC = 'true_test2/'
    opt.outf = g.default_repo_dir

    opt = addDataInfo(opt)
    assert (opt.batchSize % 3 == 0)

    print_prop(opt)
    opt.outTrueA = opt.outf + "samples/" + opt.data + "/" + opt.outTrueA
    opt.outTrueB = opt.outf + "samples/" + opt.data + "/" + opt.outTrueB
    opt.outTrueC = opt.outf + "samples/" + opt.data + "/" + opt.outTrueC
    folderList = [opt.outTrueA, opt.outTrueB, opt.outTrueC]

    if (os.path.exists(opt.outTrueC)):
        if (os.path.exists(opt.outTrueC + "/mark")):  # indeed finished
            print("Sampling already finished before. Now pass.")
            for f in folderList:
                saveFeature(f, opt, opt.feature_model)
            return
        else:
            print("Partially finished. Now rerun. ")

    mkdir(opt.outf + "samples")
    mkdir(opt.outf + "samples/" + opt.data)
    mkdir(opt.outTrueA)
    mkdir(opt.outTrueB)
    mkdir(opt.outTrueC)

    opt.manualSeed = random.randint(1, 10000)  # fix seed
    print("Random Seed: ", opt.manualSeed)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    dataset, dataloader = getDataSet(opt)

    assert (len(dataset) >= opt.sampleSize * 3)

    def giveName(iter):  # 7 digit name.
        ans = str(iter)
        return '0' * (7 - len(ans)) + ans

    iter = 0
    subfolder = -1
    splits = len(folderList)

    for i, data in enumerate(dataloader, 0):
        img, _ = data
        if i % splits == 0:
            subfolder += 1
        for j in range(0, len(img)):
            curFolder = folderList[j % splits]
            mkdir(curFolder + str(subfolder))
            if iter >= splits * opt.sampleSize:
                break
            saveImage(
                img[j],
                curFolder + str(subfolder) + "/" + giveName(iter) + ".png")
            iter += 1
        if iter >= splits * opt.sampleSize:
            break

    for f in folderList:
        saveFeature(f, opt, opt.feature_model)
        peek(opt.data, os.path.relpath(f, opt.outf + "samples/" + opt.data))

    for folder in folderList:
        with open(folder + "/mark", "w") as f:
            f.write("")
예제 #42
0
    def run(self,
            pkgdir,
            mirror,
            mirror_name,
            network_root=None,
            kickstart_file=None,
            rsync_flags=None,
            arch=None,
            breed=None,
            os_version=None):
        self.pkgdir = pkgdir
        self.mirror = mirror
        self.mirror_name = mirror_name
        self.network_root = network_root
        self.kickstart_file = kickstart_file
        self.rsync_flags = rsync_flags
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "": self.arch = None
        if self.mirror == "": self.mirror = None
        if self.mirror_name == "": self.mirror_name = None
        if self.kickstart_file == "": self.kickstart_file = None
        if self.os_version == "": self.os_version = None
        if self.rsync_flags == "": self.rsync_flags = None
        if self.network_root == "": self.network_root = None

        # If no breed was specified on the command line, set it to "redhat" for this module
        if self.breed == None:
            self.breed = "vmware"

        # debug log stuff for testing
        #self.logger.info("self.pkgdir = %s" % str(self.pkgdir))
        #self.logger.info("self.mirror = %s" % str(self.mirror))
        #self.logger.info("self.mirror_name = %s" % str(self.mirror_name))
        #self.logger.info("self.network_root = %s" % str(self.network_root))
        #self.logger.info("self.kickstart_file = %s" % str(self.kickstart_file))
        #self.logger.info("self.rsync_flags = %s" % str(self.rsync_flags))
        #self.logger.info("self.arch = %s" % str(self.arch))
        #self.logger.info("self.breed = %s" % str(self.breed))
        #self.logger.info("self.os_version = %s" % str(self.os_version))

        # both --import and --name are required arguments

        if self.mirror is None:
            utils.die(self.logger, "import failed.  no --path specified")
        if self.mirror_name is None:
            utils.die(self.logger, "import failed.  no --name specified")

        # if --arch is supplied, validate it to ensure it's valid

        if self.arch is not None and self.arch != "":
            self.arch = self.arch.lower()
            if self.arch == "x86":
                # be consistent
                self.arch = "i386"
            if self.arch not in self.get_valid_arches():
                utils.die(
                    self.logger, "arch must be one of: %s" %
                    string.join(self.get_valid_arches(), ", "))

        # if we're going to do any copying, set where to put things
        # and then make sure nothing is already there.

        self.path = os.path.normpath("%s/ks_mirror/%s" %
                                     (self.settings.webdir, self.mirror_name))
        if os.path.exists(self.path) and self.arch is None:
            # FIXME : Raise exception even when network_root is given ?
            utils.die(
                self.logger,
                "Something already exists at this import location (%s).  You must specify --arch to avoid potentially overwriting existing files."
                % self.path)

        # import takes a --kickstart for forcing selection that can't be used in all circumstances

        if self.kickstart_file and not self.breed:
            utils.die(
                self.logger,
                "Kickstart file can only be specified when a specific breed is selected"
            )

        if self.os_version and not self.breed:
            utils.die(
                self.logger,
                "OS version can only be specified when a specific breed is selected"
            )

        if self.breed and self.breed.lower() not in self.get_valid_breeds():
            utils.die(self.logger,
                      "Supplied import breed is not supported by this module")

        # if --arch is supplied, make sure the user is not importing a path with a different
        # arch, which would just be silly.

        if self.arch:
            # append the arch path to the name if the arch is not already
            # found in the name.
            for x in self.get_valid_arches():
                if self.path.lower().find(x) != -1:
                    if self.arch != x:
                        utils.die(
                            self.logger,
                            "Architecture found on pathname (%s) does not fit the one given in command line (%s)"
                            % (x, self.arch))
                    break
            else:
                # FIXME : This is very likely removed later at get_proposed_name, and the guessed arch appended again
                self.path += ("-%s" % self.arch)

        # make the output path and mirror content but only if not specifying that a network
        # accessible support location already exists (this is --available-as on the command line)

        if self.network_root is None:
            # we need to mirror (copy) the files

            utils.mkdir(self.path)

            # prevent rsync from creating the directory name twice
            # if we are copying via rsync

            if not self.mirror.endswith("/"):
                self.mirror = "%s/" % self.mirror

            if self.mirror.startswith("http://") or self.mirror.startswith(
                    "ftp://") or self.mirror.startswith("nfs://"):

                # http mirrors are kind of primative.  rsync is better.
                # that's why this isn't documented in the manpage and we don't support them.
                # TODO: how about adding recursive FTP as an option?

                utils.die(self.logger, "unsupported protocol")

            else:

                # good, we're going to use rsync..
                # we don't use SSH for public mirrors and local files.
                # presence of user@host syntax means use SSH

                spacer = ""
                if not self.mirror.startswith(
                        "rsync://") and not self.mirror.startswith("/"):
                    spacer = ' -e "ssh" '
                rsync_cmd = RSYNC_CMD
                if self.rsync_flags:
                    rsync_cmd = rsync_cmd + " " + self.rsync_flags

                # kick off the rsync now

                utils.run_this(rsync_cmd, (spacer, self.mirror, self.path),
                               self.logger)

        else:

            # rather than mirroring, we're going to assume the path is available
            # over http, ftp, and nfs, perhaps on an external filer.  scanning still requires
            # --mirror is a filesystem path, but --available-as marks the network path

            if not os.path.exists(self.mirror):
                utils.die(self.logger, "path does not exist: %s" % self.mirror)

            # find the filesystem part of the path, after the server bits, as each distro
            # URL needs to be calculated relative to this.

            if not self.network_root.endswith("/"):
                self.network_root = self.network_root + "/"
            self.path = os.path.normpath(self.mirror)
            valid_roots = ["nfs://", "ftp://", "http://"]
            for valid_root in valid_roots:
                if self.network_root.startswith(valid_root):
                    break
            else:
                utils.die(
                    self.logger,
                    "Network root given to --available-as must be nfs://, ftp://, or http://"
                )
            if self.network_root.startswith("nfs://"):
                try:
                    (a, b, rest) = self.network_root.split(":", 3)
                except:
                    utils.die(
                        self.logger,
                        "Network root given to --available-as is missing a colon, please see the manpage example."
                    )

        # now walk the filesystem looking for distributions that match certain patterns

        self.logger.info("adding distros")
        distros_added = []
        # FIXME : search below self.path for isolinux configurations or known directories from TRY_LIST
        os.path.walk(self.path, self.distro_adder, distros_added)

        # find out if we can auto-create any repository records from the install tree

        #if self.network_root is None:
        #    self.logger.info("associating repos")
        #    # FIXME: this automagic is not possible (yet) without mirroring
        #    self.repo_finder(distros_added)

        # find the most appropriate answer files for each profile object

        self.logger.info("associating kickstarts")
        self.kickstart_finder(distros_added)

        # ensure bootloaders are present
        self.api.pxegen.copy_bootloaders()

        return True
예제 #43
0
    def run_temporal(self, checkpoint_dir, vid_dir, frame_ext, out_dir,
                     amplification_factor, fl, fh, fs, n_filter_tap,
                     filter_type):
        """Magnify video with a temporal filter.

        Args:
            checkpoint_dir: checkpoint directory.
            vid_dir: directory containing video frames videos are processed
                in sorted order.
            out_dir: directory to place output frames and resulting video.
            amplification_factor: the amplification factor,
                with 0 being no change.
            fl: low cutoff frequency.
            fh: high cutoff frequency.
            fs: sampling rate of the video.
            n_filter_tap: number of filter tap to use.
            filter_type: Type of filter to use. Can be one of "fir",
                "butter", or "differenceOfIIR". For "differenceOfIIR",
                fl and fh specifies rl and rh coefficients as in Wadhwa et al.
        """

        nyq = fs / 2.0
        if filter_type == 'fir':
            filter_b = firwin(n_filter_tap, [fl, fh], nyq=nyq, pass_zero=False)
            filter_a = []
        elif filter_type == 'butter':
            filter_b, filter_a = butter(n_filter_tap, [fl / nyq, fh / nyq],
                                        btype='bandpass')
            filter_a = filter_a[1:]
        elif filter_type == 'differenceOfIIR':
            # This is a copy of what Neal did. Number of taps are ignored.
            # Treat fl and fh as rl and rh as in Wadhwa's code.
            # Write down the difference of difference equation in Fourier
            # domain to proof this:
            filter_b = [fh - fl, fl - fh]
            filter_a = [-1.0 * (2.0 - fh - fl), (1.0 - fl) * (1.0 - fh)]
        else:
            raise ValueError('Filter type must be either '
                             '["fir", "butter", "differenceOfIIR"] got ' + \
                             filter_type)
        head, tail = os.path.split(out_dir)
        tail = tail + '_fl{}_fh{}_fs{}_n{}_{}'.format(fl, fh, fs, n_filter_tap,
                                                      filter_type)
        out_dir = os.path.join(head, tail)
        vid_name = os.path.basename(out_dir)
        # make folder
        mkdir(out_dir)
        vid_frames = sorted(glob(os.path.join(vid_dir, '*.' + frame_ext)))
        first_frame = vid_frames[0]
        im = imread(first_frame)
        image_height, image_width = im.shape
        if not self.is_graph_built:
            self.image_width = image_width
            self.image_height = image_height
            # Figure out image dimension
            self._build_IIR_filtering_graphs()
            ginit_op = tf.global_variables_initializer()
            linit_op = tf.local_variables_initializer()
            self.sess.run([ginit_op, linit_op])

            if self.load(checkpoint_dir):
                print("[*] Load Success")
            else:
                raise RuntimeError('MagNet: Failed to load checkpoint file.')
            self.is_graph_built = True
        try:
            i = int(self.ckpt_name.split('-')[-1])
            print("Iteration number is {:d}".format(i))
            vid_name = vid_name + '_' + str(i)
        except:
            print("Cannot get iteration number")

        if len(filter_a) is not 0:
            x_state = []
            y_state = []

            for frame in tqdm(vid_frames, desc='Applying IIR'):
                file_name = os.path.basename(frame)
                frame_no, _ = os.path.splitext(file_name)
                frame_no = int(frame_no)
                in_frames = [
                    load_train_data([frame, frame, frame],
                                    gray_scale=self.n_channels == 1,
                                    is_testing=True)
                ]
                in_frames = np.array(in_frames).astype(np.float32)

                texture_enc, x = self.sess.run(
                    [self.texture_enc, self.shape_rep],
                    feed_dict={
                        self.input_image: in_frames[:, :, :, :3],
                    })
                x_state.insert(0, x)
                # set up initial condition.
                while len(x_state) < len(filter_b):
                    x_state.insert(0, x)
                if len(x_state) > len(filter_b):
                    x_state = x_state[:len(filter_b)]
                y = np.zeros_like(x)
                for i in range(len(x_state)):
                    y += x_state[i] * filter_b[i]
                for i in range(len(y_state)):
                    y -= y_state[i] * filter_a[i]
                # update y state
                y_state.insert(0, y)
                if len(y_state) > len(filter_a):
                    y_state = y_state[:len(filter_a)]

                out_amp = self.sess.run(self.output_image,
                                        feed_dict={
                                            self.out_texture_enc:
                                            texture_enc,
                                            self.filtered_enc:
                                            y,
                                            self.ref_shape_enc:
                                            x,
                                            self.amplification_factor:
                                            [amplification_factor]
                                        })

                im_path = os.path.join(out_dir, file_name)
                out_amp = np.squeeze(out_amp)
                out_amp = (127.5 * (out_amp + 1)).astype('uint8')
                cv2.imwrite(im_path,
                            cv2.cvtColor(out_amp, code=cv2.COLOR_RGB2BGR))
        else:
            # This does FIR in fourier domain. Equivalent to cyclic
            # convolution.
            x_state = None
            for i, frame in tqdm(enumerate(vid_frames),
                                 desc='Getting encoding'):
                file_name = os.path.basename(frame)
                in_frames = [
                    load_train_data([frame, frame, frame],
                                    gray_scale=self.n_channels == 1,
                                    is_testing=True)
                ]
                in_frames = np.array(in_frames).astype(np.float32)

                texture_enc, x = self.sess.run(
                    [self.texture_enc, self.shape_rep],
                    feed_dict={
                        self.input_image: in_frames[:, :, :, :3],
                    })
                if x_state is None:
                    x_state = np.zeros(x.shape + (len(vid_frames), ),
                                       dtype='float32')
                x_state[:, :, :, :, i] = x

            filter_fft = np.fft.fft(np.fft.ifftshift(filter_b),
                                    n=x_state.shape[-1])
            # Filtering
            for i in trange(x_state.shape[1], desc="Applying FIR filter"):
                x_fft = np.fft.fft(x_state[:, i, :, :], axis=-1)
                x_fft *= filter_fft[np.newaxis, np.newaxis, np.newaxis, :]
                x_state[:, i, :, :] = np.fft.ifft(x_fft)

            for i, frame in tqdm(enumerate(vid_frames), desc='Decoding'):
                file_name = os.path.basename(frame)
                frame_no, _ = os.path.splitext(file_name)
                frame_no = int(frame_no)
                in_frames = [
                    load_train_data([frame, frame, frame],
                                    gray_scale=self.n_channels == 1,
                                    is_testing=True)
                ]
                in_frames = np.array(in_frames).astype(np.float32)
                texture_enc, _ = self.sess.run(
                    [self.texture_enc, self.shape_rep],
                    feed_dict={
                        self.input_image: in_frames[:, :, :, :3],
                    })
                out_amp = self.sess.run(self.output_image,
                                        feed_dict={
                                            self.out_texture_enc:
                                            texture_enc,
                                            self.filtered_enc:
                                            x_state[:, :, :, :, i],
                                            self.ref_shape_enc:
                                            x,
                                            self.amplification_factor:
                                            [amplification_factor]
                                        })

                im_path = os.path.join(out_dir, file_name)
                out_amp = np.squeeze(out_amp)
                out_amp = (127.5 * (out_amp + 1)).astype('uint8')
                cv2.imwrite(im_path,
                            cv2.cvtColor(out_amp, code=cv2.COLOR_RGB2BGR))
            del x_state

        # Try to combine it into a video
        call([
            DEFAULT_VIDEO_CONVERTER, '-y', '-f', 'image2', '-r', '30', '-i',
            os.path.join(out_dir, '%06d.png'), '-c:v', 'libx264',
            os.path.join(out_dir, vid_name + '.mp4')
        ])
예제 #44
0
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('--aspect-ratio-group-factor', default=0, type=int)
    parser.add_argument(
        "--test-only",
        dest="test_only",
        help="Only test the model",
        action="store_true",
    )
    parser.add_argument(
        "--pretrained",
        dest="pretrained",
        help="Use pre-trained models from the modelzoo",
        action="store_true",
    )

    # distributed training parameters
    parser.add_argument('--world-size',
                        default=1,
                        type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist-url',
                        default='env://',
                        help='url used to set up distributed training')

    args = parser.parse_args()

    if args.output_dir:
        utils.mkdir(args.output_dir)

    main(args)
예제 #45
0
    os.environ["PATH"] += ':/home/dingning/anaconda3/envs/vlp/bin'
    parser = argparse.ArgumentParser(description="train")
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("opts", default=None, nargs=argparse.REMAINDER)
    args = parser.parse_args()

    if config.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group("nccl", init_method="env://")
        synchronize()

    config.merge_from_list(args.opts)
    config.freeze()

    save_dir = os.path.join(config.save_dir, f'train')
    mkdir(save_dir)
    logger = setup_logger("train", save_dir, get_rank())
    logger.info("Running with config:\n{}".format(config))

    arguments = {'iteration': 0}
    device = torch.device(config.device)

    bert_config = BertConfig(type_vocab_size=len(config.boundaries) + 2)
    generator = Generator(bert_config)
    generator = generator.to(device)

    optimizer = AdamW(
        params=generator.parameters(),
        lr=config.solver.lr,
        weight_decay=config.solver.weight_decay,
        betas=config.solver.betas
예제 #46
0
         outputDir = inputFile.split('.seq')[0]+'_converted'
 except:
     pass
 outputPNGDir = outputDir+'/png'
 outputNPYDir = outputDir+'/npy'
 if (numpy.isnan(outputPNGFlag)):
     outputPNGFlag = 1
 if (numpy.isnan(scalingFactor)):
     scalingFactor = 1
 if (numpy.isnan(firstFrame)):
     firstFrame = 1
 if (numpy.isnan(lastFrame)):
     lastFrame = 1e10
     
 if (rank==0 and (outputPNGFlag==1 or outputNPYFlag==1)):
     utils.mkdir(outputDir)
     if (outputPNGFlag==1):
         utils.mkdir(outputPNGDir)
     if (outputNPYFlag==1):
         utils.mkdir(outputNPYDir)
         
 inputFileList.append(inputFile)
 outputDirList.append(outputDir)
 outputPNGFlagList.append(outputPNGFlag)
 outputPNGDirList.append(outputPNGDir)
 outputNPYFlagList.append(outputNPYFlag)
 outputNPYDirList.append(outputNPYDir)
 scalingFactorList.append(scalingFactor)
 firstFrameList.append(firstFrame)
 lastFrameList.append(lastFrame)
 darkRefFileList.append(darkRefFile)
예제 #47
0
        "count": 20,
    },
}
TEMPLATE_KCWIKI = {
    'url': "http://api.kcwiki.moe/tweet/20",
}


class Twitter:
    tweets = {}
    inited = {}
    html_tag = re.compile(r'<\w+.*?>|</\w+>')
    image_subdir = 'twitter'


utils.mkdir(os.path.join(CQ_IMAGE_ROOT, Twitter.image_subdir))


class Tweet:
    def __init__(self, id_):
        self.id_ = id_
        self.user = None
        self.date = None
        self.ja = ''
        self.zh = ''
        self.media = []

    def __str__(self):
        if self.user is None or self.date is None:
            error("Stringify `Tweet` without `user` or `date`.")
            raise ValueError()
예제 #48
0
def create_batches(path_to_structured_folder_tree,
                   path_to_results,
                   num_cases_per_batch,
                   min_images,
                   max_images,
                   image_size=DEFAULT_FACE_SIZE,
                   use_grayscale=False):
    # PART 0 - preparations
    utils.mkdir(path_to_results)
    meta = {
        "label_names": [],
        "num_cases_per_batch": num_cases_per_batch,
        "img_size": image_size,
        "num_vis": image_size * image_size
    }  # being used below
    if not use_grayscale:
        meta["num_vis"] *= 3

    # PART 1 - scan directories
    print "Scanning directories..."
    folder_tree_dictionary = folder_tree_to_dictionary(
        path_to_structured_folder_tree)
    person_indexes = {}
    for folder in folder_tree_dictionary.keys():
        if (len(folder_tree_dictionary[folder]) >= min_images):
            level_list(folder_tree_dictionary[folder], max_images)
            # create dictionary of person names and their respective index in meta
            person_indexes[folder] = len(meta["label_names"])
            meta["label_names"].append(normalize_string(folder))
        else:
            del folder_tree_dictionary[folder]

    total_nr_of_images = sum([
        len(folder_tree_dictionary[folder])
        for folder in folder_tree_dictionary
    ])
    print "Total number of images: ", total_nr_of_images
    print "Number of classes: ", len(meta["label_names"])

    # PART 2 - load images
    print "Loading images..."
    data = numpy.empty((meta["num_vis"], total_nr_of_images), dtype="uint8")
    data_index = 0
    labels = []
    filenames = []
    for folder in folder_tree_dictionary:
        for file_name in folder_tree_dictionary[folder]:
            file_path = join(path_to_structured_folder_tree, folder, file_name)
            face = cv2.imread(file_path)
            face = cv2.resize(face, (image_size, image_size))

            data[:, data_index] = prepare_image(face, use_grayscale)
            labels.append(person_indexes[folder])
            filenames.append(os.path.basename(file_name))
            data_index += 1

    # PART 3 - dump metadata
    print "Writing batches.meta"
    # calculate mean of the images
    meta['data_mean'] = numpy.mean(data, 1)
    pickle(meta, join(path_to_results, "batches.meta"))

    # PART 4 - create batches
    random_indexes = range(total_nr_of_images)
    random.shuffle(random_indexes)
    nr_of_batches = int(
        math.ceil(total_nr_of_images / float(num_cases_per_batch)))
    for i in range(nr_of_batches):
        batch_start = i * num_cases_per_batch
        batch_end = min((i + 1) * num_cases_per_batch, total_nr_of_images)
        batch_range = random_indexes[batch_start:batch_end]
        batch = {
            "data": data[:, batch_range],
            "labels": [labels[j] for j in batch_range],
            "filenames": [filenames[j] for j in batch_range],
            "batch_label": "data batch %d of %d" % (i, nr_of_batches)
        }

        print "Writing batch nr %d with %d images" % (i,
                                                      batch_end - batch_start)
        pickle(batch, join(path_to_results, "data_batch_%d" % i))
예제 #49
0
파일: prepare.py 프로젝트: ChloeCZY/DNCNN
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--images-dir', type=str, default=r'E:\PythonCode\bishe\baseline2\database\test\test')
    parser.add_argument('--h5-path', type=str, default=r'E:\PythonCode\bishe\baseline2\h5')
    parser.add_argument('--patch-size', type=int, default=64)
    parser.add_argument('--stride', type=int, default=100)
    parser.add_argument('--scale', type=int, default=3)
    parser.add_argument('--function', type=str, default='test')
    parser.add_argument('--eval', action='store_true', default=False)  # input --eval, eval(); not, train()
    parser.add_argument('--JPEG-factor', type=int, default=40)
    parser.add_argument('--jpg-image-dir', type=str, default=(os.path.join(os.getcwd(),'database')))
    args = parser.parse_args()
    args.jpg_image_dir = os.path.join(args.jpg_image_dir, args.function)
    args.jpg_image_dir = os.path.join(args.jpg_image_dir, ('jpg_image_'+args.function))
    mkdir(args.images_dir), mkdir(args.jpg_image_dir)
    action = args.function
    print(args)

    if action == 'train':
        args.h5_path = os.path.join(args.h5_path, 'train_'+str(args.scale)+'.h5')  # input h5
        train(args)
        print('train')
    elif action == 'eval':
        args.h5_path = os.path.join(args.h5_path, 'eval_' + str(args.scale) + '.h5')  # input h5
        eval(args)
        print('eval')
    elif action == 'test':
        test(args)
        print('test')
    else:
예제 #50
0
def main(args):
    if args.output_dir:
        utils.mkdir(args.output_dir)

    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    dataset, num_classes = get_dataset(args.dataset, "train", get_transform(train=True))
    dataset_test, _ = get_dataset(args.dataset, "val", get_transform(train=False))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
        test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test)
    else:
        train_sampler = torch.utils.data.RandomSampler(dataset)
        test_sampler = torch.utils.data.SequentialSampler(dataset_test)

    data_loader = torch.utils.data.DataLoader(
        dataset, batch_size=args.batch_size,
        sampler=train_sampler, num_workers=args.workers,
        collate_fn=utils.collate_fn, drop_last=True)

    data_loader_test = torch.utils.data.DataLoader(
        dataset_test, batch_size=1,
        sampler=test_sampler, num_workers=args.workers,
        collate_fn=utils.collate_fn)

    model = torchvision.models.segmentation.__dict__[args.model](num_classes=num_classes, aux_loss=args.aux_loss)
    model.to(device)
    if args.distributed:
        model = torch.nn.utils.convert_sync_batchnorm(model)

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model.load_state_dict(checkpoint['model'])

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        model_without_ddp = model.module

    if args.test_only:
        confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)
        print(confmat)
        return

    params_to_optimize = [
        {"params": [p for p in model_without_ddp.backbone.parameters() if p.requires_grad]},
        {"params": [p for p in model_without_ddp.classifier.parameters() if p.requires_grad]},
    ]
    if args.aux_loss:
        params = [p for p in model_without_ddp.aux_classifier.parameters() if p.requires_grad]
        params_to_optimize.append({"params": params, "lr": args.lr * 10})
    optimizer = torch.optim.SGD(
        params_to_optimize,
        lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
        optimizer,
        lambda x: (1 - x / (len(data_loader) * args.epochs)) ** 0.9)

    start_time = time.time()
    for epoch in range(args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq)
        confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes)
        print(confmat)
        utils.save_on_master(
            {
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'epoch': epoch,
                'args': args
            },
            os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
예제 #51
0
파일: templar.py 프로젝트: whitekid/cobbler
    def render(self,
               data_input,
               search_table,
               out_path,
               subject=None,
               template_type=None):
        """
        Render data_input back into a file.
        data_input is either a string or a filename
        search_table is a hash of metadata keys and values
        out_path if not-none writes the results to a file
        (though results are always returned)
        subject is a profile or system object, if available (for snippet eval)
        """

        if not isinstance(data_input, basestring):
            raw_data = data_input.read()
        else:
            raw_data = data_input
        lines = raw_data.split('\n')

        if not template_type:
            # Assume we're using the default template type, if set in
            # the settinigs file or use cheetah as the last resort
            if self.settings and self.settings.default_template_type:
                template_type = self.settings.default_template_type
            else:
                template_type = "cheetah"

        if len(lines) > 0 and lines[0].find("#template=") == 0:
            # pull the template type out of the first line and then drop
            # it and rejoin them to pass to the template language
            template_type = lines[0].split("=")[1].strip().lower()
            del lines[0]
            raw_data = string.join(lines, "\n")

        if template_type == "cheetah":
            data_out = self.render_cheetah(raw_data, search_table, subject)
        elif template_type == "jinja2":
            if jinja2_available:
                data_out = self.render_jinja2(raw_data, search_table, subject)
            else:
                return "# ERROR: JINJA2 NOT AVAILABLE. Maybe you need to install python-jinja2?\n"
        else:
            return "# ERROR: UNSUPPORTED TEMPLATE TYPE (%s)" % str(
                template_type)

        # now apply some magic post-filtering that is used by cobbler import and some
        # other places.  Forcing folks to double escape things would be very unwelcome.
        hp = search_table.get("http_port", "80")
        server = search_table.get("server", "server.example.org")
        if hp not in (80, '80'):
            repstr = "%s:%s" % (server, hp)
        else:
            repstr = server
        search_table["http_server"] = repstr

        for x in search_table.keys():
            if type(x) == str:
                data_out = data_out.replace("@@%s@@" % str(x),
                                            str(search_table[str(x)]))

        # remove leading newlines which apparently breaks AutoYAST ?
        if data_out.startswith("\n"):
            data_out = data_out.lstrip()

        # if requested, write the data out to a file
        if out_path is not None:
            utils.mkdir(os.path.dirname(out_path))
            fd = open(out_path, "w+")
            fd.write(data_out)
            fd.close()

        return data_out
예제 #52
0
def main(args):
    if args.output_dir:
        utils.mkdir(args.output_dir)

    utils.init_distributed_mode(args)
    print(args)

    device = torch.device(args.device)

    # Data loading code
    print("Loading data")

    dataset, num_classes = get_dataset(
        args.dataset, "train", get_transform(True, args.data_augmentation),
        args.data_path)
    dataset_test, _ = get_dataset(args.dataset, "val",
                                  get_transform(False, args.data_augmentation),
                                  args.data_path)

    print("Creating data loaders")
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            dataset)
        test_sampler = torch.utils.data.distributed.DistributedSampler(
            dataset_test)
    else:
        train_sampler = torch.utils.data.RandomSampler(dataset)
        test_sampler = torch.utils.data.SequentialSampler(dataset_test)

    if args.aspect_ratio_group_factor >= 0:
        group_ids = create_aspect_ratio_groups(
            dataset, k=args.aspect_ratio_group_factor)
        train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids,
                                                  args.batch_size)
    else:
        train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,
                                                            args.batch_size,
                                                            drop_last=True)

    data_loader = torch.utils.data.DataLoader(
        dataset,
        batch_sampler=train_batch_sampler,
        num_workers=args.workers,
        collate_fn=utils.collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   sampler=test_sampler,
                                                   num_workers=args.workers,
                                                   collate_fn=utils.collate_fn)

    print("Creating model")
    kwargs = {"trainable_backbone_layers": args.trainable_backbone_layers}
    if "rcnn" in args.model:
        if args.rpn_score_thresh is not None:
            kwargs["rpn_score_thresh"] = args.rpn_score_thresh
    model = torchvision.models.detection.__dict__[args.model](
        num_classes=num_classes, pretrained=args.pretrained, **kwargs)
    model.to(device)
    if args.distributed and args.sync_bn:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    params = [p for p in model.parameters() if p.requires_grad]
    optimizer = torch.optim.SGD(params,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    args.lr_scheduler = args.lr_scheduler.lower()
    if args.lr_scheduler == "multisteplr":
        lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=args.lr_steps, gamma=args.lr_gamma)
    elif args.lr_scheduler == "cosineannealinglr":
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer, T_max=args.epochs)
    else:
        raise RuntimeError(
            "Invalid lr scheduler '{}'. Only MultiStepLR and CosineAnnealingLR "
            "are supported.".format(args.lr_scheduler))

    if args.resume:
        checkpoint = torch.load(args.resume, map_location="cpu")
        model_without_ddp.load_state_dict(checkpoint["model"])
        optimizer.load_state_dict(checkpoint["optimizer"])
        lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
        args.start_epoch = checkpoint["epoch"] + 1

    if args.test_only:
        evaluate(model, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, optimizer, data_loader, device, epoch,
                        args.print_freq)
        lr_scheduler.step()
        if args.output_dir:
            checkpoint = {
                "model": model_without_ddp.state_dict(),
                "optimizer": optimizer.state_dict(),
                "lr_scheduler": lr_scheduler.state_dict(),
                "args": args,
                "epoch": epoch,
            }
            utils.save_on_master(
                checkpoint,
                os.path.join(args.output_dir, "model_{}.pth".format(epoch)))
            utils.save_on_master(
                checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))

        # evaluate after every epoch
        evaluate(model, data_loader_test, device=device)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print("Training time {}".format(total_time_str))
                utils.imwrite(
                    utils.immerge(f_sample_opt, 10,
                                  10), '%s/Epoch_(%d)_(%dof%d).jpg' %
                    (save_dir, epoch, it_epoch, batch_epoch))


if __name__ == '__main__':

    #General Parameters
    n_critic = 1  #How many iteraction we update the critic (In VDB network we update both D and G one time)
    gpu_id = 0  #When you use multiple GPUs
    epoch = 200
    batch_size = 32
    lr = 0.0001
    z_dim = 100  #Dimentiones of the random noise vactor

    #Sampliong Mnist Data
    utils.mkdir('./data/mnist/')
    data.mnist_download('./data/mnist')
    imgs, _, _ = data.mnist_load('./data/mnist')
    imgs.shape = imgs.shape + (1, )
    data_pool = utils.MemoryData({'img': imgs}, batch_size)

    #Variational Information Bottleneck and Training Related Paramters
    bottle_dim = 512  #dimentiones of the bottleneck layer
    I_c = 0.5  #This is the information contraint (Eqation(2))
    Alpha = 1e-6  #This controls the Beta update in dual grdients

    # invoke the main function of the script
    main(epoch=epoch,batch_size=batch_size,lr=lr,z_dim=z_dim,bottle_dim=bottle_dim,i_c=I_c\
        ,alpha=Alpha,n_critic=n_critic,gpu_id=gpu_id,data_pool=data_pool)
def main(epoch, batch_size, lr, z_dim, bottle_dim, i_c, alpha, n_critic,
         gpu_id, data_pool):

    with tf.device('/gpu:%d' % gpu_id):  #Placing the ops under devices

        generator = models.generator  #Generator Object
        discriminator = models.discriminator_wgan_gp  #Discriminator Object

        # inputs Placeholders
        real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
        z = tf.placeholder(tf.float32, shape=[None, z_dim])

        # generate fake data with the generator
        fake = generator(z, reuse=False)

        # Obtaining scores , means and stds for real and fake data from the discriminator
        r_logit, r_mus, r_sigmas = discriminator(real,
                                                 reuse=False,
                                                 gen_train=False,
                                                 bottleneck_dim=bottle_dim)
        f_logit, f_mus, f_sigmas = discriminator(fake,
                                                 gen_train=False,
                                                 bottleneck_dim=bottle_dim)

        #Obtaining wasserstein loss and gradient penalty losses to train the discriminator
        wasserstein_d = losses.wgan_loss(r_logit, f_logit)
        gp = losses.gradient_penalty(real, fake, discriminator)

        #We obtain the bottleneck loss in the discriminator
        #Inputs to this function are bottleneck layer mus and stds for both real and fake data. i_c is the
        #the information constriant or upperbound. This is an important paramters
        bottleneck_loss=losses._bottleneck_loss(real_mus=r_mus, fake_mus=f_mus,\
            real_sigmas=r_sigmas,fake_sigmas=f_sigmas,i_c=i_c)

        #This used in lagrangian multiplier optimization. This is paramters also get updated adaptivly.
        #To read more about duel gradient desenet in deep learning please read - https://medium.com/@jonathan_hui/rl-dual-gradient-descent-fac524c1f049
        #Initialize with the zero

        beta = tf.Variable(tf.zeros([]), name="beta")

        #Combined both losses (10 is the default hyper paramters given by the paper
        # - https://arxiv.org/pdf/1704.00028.pdf )
        d_loss = -wasserstein_d + gp * 10.0 + beta * bottleneck_loss

        #We said b also should adaptively get updated. Here we maximize the beta paramters with follwoing function
        #Please refer to the VDB paper's equation (9) understand more about the update
        beta_new = tf.maximum(0.0, beta + alpha * bottleneck_loss)

        #This is the main difference from the pytoch implementation. In tensorlfow we have a static graph. S
        # to update the beta with above menitoned function we have to use tf.assign()
        assign_op = tf.assign(beta, beta_new)  #beta.assign(beta_new)

        #This is the generator loss
        #As described in the paper we have a simple loss to the generator which uses mean scores from
        #the generated samples
        f_logit_gen, f_mus_gen, f_sigmas_gen = discriminator(
            fake, gen_train=True, bottleneck_dim=bottle_dim)
        g_loss = -tf.reduce_mean(f_logit_gen)

        #Assigning two optimizers to train both Generator and the Discriminator
        d_var = tf.trainable_variables('discriminator')
        g_var = tf.trainable_variables('generator')

        d_step = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(
            d_loss, var_list=d_var)
        g_step = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(
            g_loss, var_list=g_var)

    # Tensorbored summaries for  plot losses
    wd = wasserstein_d
    d_summary = utils.summary({wd: 'wd', gp: 'gp'})
    g_summary = utils.summary({g_loss: 'g_loss'})
    beta_summary = utils.b_summary(beta)
    #beta_summary = utils.summary({beta: 'beta'})

    #sess= tf.Session()

    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    with tf.Session(config=config) as sess:
        # iteration counter
        it_cnt, update_cnt = utils.counter()
        # saver
        saver = tf.train.Saver(
            max_to_keep=5
        )  #Use to save both generator and discriminator paramters
        # summary writer
        summary_writer = tf.summary.FileWriter('./summaries/mnist_wgan_gp',
                                               sess.graph)
        ''' Checking for previuosly trained checkpints'''
        ckpt_dir = './checkpoints/mnist_wgan_gp'
        utils.mkdir(ckpt_dir + '/')
        if not utils.load_checkpoint(ckpt_dir, sess):
            sess.run(tf.global_variables_initializer())

        #Starting the training loop
        batch_epoch = len(data_pool) // (batch_size * n_critic)
        max_it = epoch * batch_epoch
        for it in range(sess.run(it_cnt), max_it):
            sess.run(update_cnt)

            # which epoch
            epoch = it // batch_epoch
            it_epoch = it % batch_epoch + 1

            # train D
            for i in range(
                    n_critic
            ):  #Fist we train the discriminator for few iterations (Here I used only 1)
                # batch data
                real_ipt = data_pool.batch('img')  #Read data batch
                z_ipt = np.random.normal(size=[batch_size,
                                               z_dim])  #Sample nosice input

                d_summary_opt, _ = sess.run([d_summary, d_step],
                                            feed_dict={
                                                real: real_ipt,
                                                z: z_ipt
                                            })  #Discriminator Gradient Update
                beta_summary_opt = sess.run(beta_summary)
                #_ = sess.run([d_step], feed_dict={real: real_ipt, z: z_ipt})
                sess.run([assign_op], feed_dict={
                    real: real_ipt,
                    z: z_ipt
                })  #Adpatively update the beta parameter

            summary_writer.add_summary(d_summary_opt, it)
            summary_writer.add_summary(beta_summary_opt, it)

            # train the geenrator (Here we have a simple generator as in normal Wgan)
            z_ipt = np.random.normal(size=[batch_size, z_dim])
            g_summary_opt, _ = sess.run([g_summary, g_step],
                                        feed_dict={z: z_ipt})
            #_ = sess.run([g_step], feed_dict={z: z_ipt})
            summary_writer.add_summary(g_summary_opt, it)

            # display training progress
            if it % 100 == 0:
                print("Epoch: (%3d) (%5d/%5d)" %
                      (epoch, it_epoch, batch_epoch))

            # saving the checpoints after every 1000 interation
            if (it + 1) % 1000 == 0:
                save_path = saver.save(
                    sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                    (ckpt_dir, epoch, it_epoch, batch_epoch))
                print('Model saved in file: % s' % save_path)

            #This is to save the  image generation during the trainign as tiles
            if (it + 1) % 100 == 0:
                z_input_sample = np.random.normal(size=[100,
                                                        z_dim])  #Noise samples
                f_sample = generator(z)
                f_sample_opt = sess.run(f_sample,
                                        feed_dict={z: z_input_sample})

                save_dir = './sample_images_while_training/mnist_wgan_gp'
                utils.mkdir(save_dir + '/')
                utils.imwrite(
                    utils.immerge(f_sample_opt, 10,
                                  10), '%s/Epoch_(%d)_(%dof%d).jpg' %
                    (save_dir, epoch, it_epoch, batch_epoch))
예제 #55
0
def main():
    opts = get_argparser().parse_args()
    if opts.dataset.lower() == 'voc':
        opts.num_classes = 21
    elif opts.dataset.lower() == 'cityscapes':
        opts.num_classes = 19

    # Setup visualization
    vis = Visualizer(port=opts.vis_port,
                     env=opts.vis_env) if opts.enable_vis else None
    if vis is not None:  # display options
        vis.vis_table("Options", vars(opts))

    os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Device: %s" % device)

    # Setup random seed
    torch.manual_seed(opts.random_seed)
    np.random.seed(opts.random_seed)
    random.seed(opts.random_seed)

    # Setup dataloader
    if opts.dataset=='voc' and not opts.crop_val:
        opts.val_batch_size = 1
    
    train_dst, val_dst = get_dataset(opts)
    train_loader = data.DataLoader(
        train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=2)
    val_loader = data.DataLoader(
        val_dst, batch_size=opts.val_batch_size, shuffle=True, num_workers=2)
    print("Dataset: %s, Train set: %d, Val set: %d" %
          (opts.dataset, len(train_dst), len(val_dst)))

    # Set up model
    model_map = {
        'deeplabv3_resnet50': network.deeplabv3_resnet50,
        'deeplabv3plus_resnet50': network.deeplabv3plus_resnet50,
        'deeplabv3_resnet101': network.deeplabv3_resnet101,
        'deeplabv3plus_resnet101': network.deeplabv3plus_resnet101,
        'deeplabv3_mobilenet': network.deeplabv3_mobilenet,
        'deeplabv3plus_mobilenet': network.deeplabv3plus_mobilenet
    }

    model = model_map[opts.model](num_classes=opts.num_classes, output_stride=opts.output_stride)
    if opts.separable_conv and 'plus' in opts.model:
        network.convert_to_separable_conv(model.classifier)
    utils.set_bn_momentum(model.backbone, momentum=0.01)
    
    # Set up metrics
    metrics = StreamSegMetrics(opts.num_classes)

    # Set up optimizer
    optimizer = torch.optim.SGD(params=[
        {'params': model.backbone.parameters(), 'lr': 0.1*opts.lr},
        {'params': model.classifier.parameters(), 'lr': opts.lr},
    ], lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
    #optimizer = torch.optim.SGD(params=model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)
    #torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.lr_decay_step, gamma=opts.lr_decay_factor)
    if opts.lr_policy=='poly':
        scheduler = utils.PolyLR(optimizer, opts.total_itrs, power=0.9)
    elif opts.lr_policy=='step':
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)

    # Set up criterion
    #criterion = utils.get_loss(opts.loss_type)
    if opts.loss_type == 'focal_loss':
        criterion = utils.FocalLoss(ignore_index=255, size_average=True)
    elif opts.loss_type == 'cross_entropy':
        criterion = nn.CrossEntropyLoss(ignore_index=255, reduction='mean')

    def save_ckpt(path):
        """ save current model
        """
        torch.save({
            "cur_itrs": cur_itrs,
            "model_state": model.module.state_dict(),
            "optimizer_state": optimizer.state_dict(),
            "scheduler_state": scheduler.state_dict(),
            "best_score": best_score,
        }, path)
        print("Model saved as %s" % path)
    
    utils.mkdir('checkpoints')
    # Restore
    best_score = 0.0
    cur_itrs = 0
    cur_epochs = 0
    if opts.ckpt is not None and os.path.isfile(opts.ckpt):
        # https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan
        checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))
        model.load_state_dict(checkpoint["model_state"])
        model = nn.DataParallel(model)
        model.to(device)
        if opts.continue_training:
            optimizer.load_state_dict(checkpoint["optimizer_state"])
            scheduler.load_state_dict(checkpoint["scheduler_state"])
            cur_itrs = checkpoint["cur_itrs"]
            best_score = checkpoint['best_score']
            print("Training state restored from %s" % opts.ckpt)
        print("Model restored from %s" % opts.ckpt)
        del checkpoint  # free memory
    else:
        print("[!] Retrain")
        model = nn.DataParallel(model)
        model.to(device)

    #==========   Train Loop   ==========#
    vis_sample_id = np.random.randint(0, len(val_loader), opts.vis_num_samples,
                                      np.int32) if opts.enable_vis else None  # sample idxs for visualization
    denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])  # denormalization for ori images

    if opts.test_only:
        model.eval()
        val_score, ret_samples = validate(
            opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
        print(metrics.to_str(val_score))
        return

    interval_loss = 0
    while True: #cur_itrs < opts.total_itrs:
        # =====  Train  =====
        model.train()
        cur_epochs += 1
        for (images, labels) in train_loader:
            #print(labels[0,:,:])
            #labels = labels.long()
            #bs, _, h, w = labels.size()
            #nc = 14 
            #input_label = torch.FloatTensor(bs,nc,h,w).zero_()
            #semantics = input_label.scatter_(1, labels, 1.0)
            
            #print(images.shape, semantics.shape)
            cur_itrs += 1

            images = images.to(device, dtype=torch.float32)
            labels = labels.to(device, dtype=torch.long)

            optimizer.zero_grad()
            outputs = model(images)
     
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            np_loss = loss.detach().cpu().numpy()
            interval_loss += np_loss
            if vis is not None:
                vis.vis_scalar('Loss', cur_itrs, np_loss)

            if (cur_itrs) % 10 == 0:
                interval_loss = interval_loss/10
                print("Epoch %d, Itrs %d/%d, Loss=%f" %
                      (cur_epochs, cur_itrs, opts.total_itrs, interval_loss))
                interval_loss = 0.0

            if (cur_itrs) % opts.val_interval == 0:
                save_ckpt('checkpoints/latest_%s_%s_os%d.pth' %
                          (opts.model, opts.dataset, opts.output_stride))
                print("validation...")
                model.eval()
                val_score, ret_samples = validate(
                    opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)
                print(metrics.to_str(val_score))
                if val_score['Mean IoU'] > best_score:  # save best model
                    best_score = val_score['Mean IoU']
                    save_ckpt('checkpoints/best_%s_%s_os%d.pth' %
                              (opts.model, opts.dataset,opts.output_stride))

                if vis is not None:  # visualize validation score and samples
                    vis.vis_scalar("[Val] Overall Acc", cur_itrs, val_score['Overall Acc'])
                    vis.vis_scalar("[Val] Mean IoU", cur_itrs, val_score['Mean IoU'])
                    vis.vis_table("[Val] Class IoU", val_score['Class IoU'])

                    for k, (img, target, lbl) in enumerate(ret_samples):
                        img = (denorm(img) * 255).astype(np.uint8)
                        target = train_dst.decode_target(target).transpose(2, 0, 1).astype(np.uint8)
                        lbl = train_dst.decode_target(lbl).transpose(2, 0, 1).astype(np.uint8)
                        concat_img = np.concatenate((img, target, lbl), axis=2)  # concat along width
                        vis.vis_image('Sample %d' % k, concat_img)
                model.train()
            scheduler.step()  

            if cur_itrs >=  opts.total_itrs:
                return
예제 #56
0
        # currently mnist is not supported!
        # assert args.dataset != 'mnist'

        # ensemble_experiment = "exp_2019-04-23_18-08-48/"
        # ensemble_experiment = "exp_2019-04-24_02-20-26"

        ensemble_experiment = args.load_models.split('/')
        if len(ensemble_experiment) > 1:
            # both the path and name of the experiment have been specified
            ensemble_dir = args.load_models
        elif len(ensemble_experiment) == 1:
            # otherwise append the directory before!
            ensemble_root_dir = "{}/{}_models/".format(args.baseroot, (args.dataset).lower())
            ensemble_dir = ensemble_root_dir + args.load_models

        utils.mkdir(ensemble_dir)
        # checkpoint_type = 'final'  # which checkpoint to use for ensembling (either of 'best' or 'final)

        if args.dataset=='mnist':
            train_loader, test_loader = get_dataloader(args)
        elif args.dataset.lower() == 'cifar10':
            args.cifar_init_lr = config['optimizer_learning_rate']
            if args.second_model_name is not None:
                assert second_config is not None
                assert args.cifar_init_lr == second_config['optimizer_learning_rate']
                # also the below things should be fine as it is just dataloader loading!
            print('loading {} dataloaders'.format(args.dataset.lower()))
            train_loader, test_loader = cifar_train.get_dataset(config)

        models = []
        accuracies = []
예제 #57
0
if __name__ == '__main__':
    # argument parser
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_path', default='data/shape3-demo')
    parser.add_argument('--workers', default=32, type=int)

    # arguments
    args = parser.parse_args()
    print('==> arguments parsed')
    for key in vars(args):
        print('[{0}] = {1}'.format(key, getattr(args, key)))

    # data path
    data_path = args.data_path
    mkdir(data_path, clean=True)

    # tasks
    num_demo = int(1e3)

    tasks = []
    with open(os.path.join(data_path, 'demo.txt'), 'w') as fp:
        for k in range(num_demo):
            print(k + 1, file=fp)
            tasks.append(k + 1)

    # process
    pool = Pool(processes=args.workers)
    with tqdm(total=len(tasks)) as progress:
        for _ in pool.imap_unordered(process, tasks):
            progress.update()
예제 #58
0
    def buildAnalysisDirs(self,
                          top_dir=None,
                          dry_run=False,
                          link_type="relative",
                          naming_scheme="partial"):
        """Construct and populate analysis directories for the experiments

        For each defined experiment, create the required analysis directories
        and populate with links to the primary data files.

        Arguments:
          top_dir: if set then create the analysis directories as
            subdirs of the specified directory; otherwise operate in cwd
          dry_run: if True then only report the mkdir, ln etc operations that
            would be performed. Default is False (do perform the operations).
          link_type: type of link to use when linking to primary data, one of
            'relative' or 'absolute'.
          naming_scheme: naming scheme to use for links to primary data, one of
            'full' (same names as primary data files), 'partial' (cut-down version
            of the full name which excludes sample names - the default), or
            'minimal' (just the library name).
        """
        # Deal with top_dir
        if top_dir:
            if os.path.exists(top_dir):
                print("Directory %s already exists" % top_dir)
            else:
                if not dry_run:
                    # Create top directory
                    print("Creating %s" % top_dir)
                    utils.mkdir(top_dir, mode=0775)
                else:
                    # Report what would have been done
                    print("mkdir %s" % top_dir)
        # Type of link
        if link_type == 'absolute':
            use_relative_links = False
        else:
            use_relative_links = True
        # For each experiment, make and populate directory
        for expt in self.experiments:
            print("Experiment: %s %s %s/%s" %
                  (expt.name, expt.type, expt.sample, expt.library))
            expt_dir = expt.dirname(top_dir)
            print("\tDir: %s" % expt_dir)
            # Make directory
            if os.path.exists(expt_dir):
                logging.warning("Directory %s already exists" % expt_dir)
            else:
                if not dry_run:
                    # Create directory
                    utils.mkdir(expt_dir, mode=0775)
                else:
                    # Report what would have been done
                    print("mkdir %s" % expt_dir)
            # Locate the primary data
            for run in self.solid_runs:
                paired_end = SolidData.is_paired_end(run)
                libraries = run.fetchLibraries(expt.sample, expt.library)
                for library in libraries:
                    # Get names for links to primary data - F3
                    ln_csfasta, ln_qual = LinkNames(naming_scheme).names(
                        library)
                    print("\t\t%s" % ln_csfasta)
                    print("\t\t%s" % ln_qual)
                    # Make links to primary data
                    try:
                        self.__linkToFile(library.csfasta,
                                          os.path.join(expt_dir, ln_csfasta),
                                          relative=use_relative_links,
                                          dry_run=dry_run)
                        self.__linkToFile(library.qual,
                                          os.path.join(expt_dir, ln_qual),
                                          relative=use_relative_links,
                                          dry_run=dry_run)
                    except Exception as ex:
                        logging.error(
                            "Failed to link to some or all F3 primary data")
                        logging.error("Exception: %s" % ex)
                    # Get names for links to F5 reads (if paired-end run)
                    if paired_end:
                        ln_csfasta, ln_qual = LinkNames(naming_scheme).names(
                            library, F5=True)
                        print("\t\t%s" % ln_csfasta)
                        print("\t\t%s" % ln_qual)
                        # Make links to F5 read data
                        try:
                            self.__linkToFile(library.csfasta_f5,
                                              os.path.join(
                                                  expt_dir, ln_csfasta),
                                              relative=use_relative_links,
                                              dry_run=dry_run)
                            self.__linkToFile(library.qual_f5,
                                              os.path.join(expt_dir, ln_qual),
                                              relative=use_relative_links,
                                              dry_run=dry_run)
                        except Exception as ex:
                            logging.error(
                                "Failed to link to some or all F5 primary data"
                            )
                            logging.error("Exception: %s" % ex)
            # Make an empty ScriptCode directory
            scriptcode_dir = os.path.join(expt_dir, "ScriptCode")
            if os.path.exists(scriptcode_dir):
                logging.warning("Directory %s already exists" % scriptcode_dir)
            else:
                if not dry_run:
                    # Create directory
                    utils.mkdir(scriptcode_dir, mode=0775)
                else:
                    # Report what would have been done
                    print("mkdir %s" % scriptcode_dir)
예제 #59
0
def main(args):
    if args.apex:
        if sys.version_info < (3, 0):
            raise RuntimeError(
                "Apex currently only supports Python 3. Aborting.")
        if amp is None:
            raise RuntimeError(
                "Failed to import apex. Please install apex from https://www.github.com/nvidia/apex "
                "to enable mixed-precision training.")

    if args.output_dir:
        utils.mkdir(args.output_dir)

    utils.init_distributed_mode(args)
    print(args)
    print("torch version: ", torch.__version__)
    print("torchvision version: ", torchvision.__version__)

    device = torch.device(args.device)

    torch.backends.cudnn.benchmark = True

    # Data loading code
    print("Loading data")
    traindir = os.path.join(args.data_path, args.train_dir)
    valdir = os.path.join(args.data_path, args.val_dir)
    normalize = T.Normalize(mean=[0.43216, 0.394666, 0.37645],
                            std=[0.22803, 0.22145, 0.216989])

    print("Loading training data")
    st = time.time()
    cache_path = _get_cache_path(traindir)
    transform_train = torchvision.transforms.Compose([
        T.ToFloatTensorInZeroOne(),
        T.Resize((128, 171)),
        T.RandomHorizontalFlip(), normalize,
        T.RandomCrop((112, 112))
    ])

    if args.cache_dataset and os.path.exists(cache_path):
        print("Loading dataset_train from {}".format(cache_path))
        dataset, _ = torch.load(cache_path)
        dataset.transform = transform_train
    else:
        if args.distributed:
            print("It is recommended to pre-compute the dataset cache "
                  "on a single-gpu first, as it will be faster")
        dataset = torchvision.datasets.Kinetics400(
            traindir,
            frames_per_clip=args.clip_len,
            step_between_clips=1,
            transform=transform_train,
            frame_rate=15)
        if args.cache_dataset:
            print("Saving dataset_train to {}".format(cache_path))
            utils.mkdir(os.path.dirname(cache_path))
            utils.save_on_master((dataset, traindir), cache_path)

    print("Took", time.time() - st)

    print("Loading validation data")
    cache_path = _get_cache_path(valdir)

    transform_test = torchvision.transforms.Compose([
        T.ToFloatTensorInZeroOne(),
        T.Resize((128, 171)), normalize,
        T.CenterCrop((112, 112))
    ])

    if args.cache_dataset and os.path.exists(cache_path):
        print("Loading dataset_test from {}".format(cache_path))
        dataset_test, _ = torch.load(cache_path)
        dataset_test.transform = transform_test
    else:
        if args.distributed:
            print("It is recommended to pre-compute the dataset cache "
                  "on a single-gpu first, as it will be faster")
        dataset_test = torchvision.datasets.Kinetics400(
            valdir,
            frames_per_clip=args.clip_len,
            step_between_clips=1,
            transform=transform_test,
            frame_rate=15)
        if args.cache_dataset:
            print("Saving dataset_test to {}".format(cache_path))
            utils.mkdir(os.path.dirname(cache_path))
            utils.save_on_master((dataset_test, valdir), cache_path)

    print("Creating data loaders")
    train_sampler = RandomClipSampler(dataset.video_clips,
                                      args.clips_per_video)
    test_sampler = UniformClipSampler(dataset_test.video_clips,
                                      args.clips_per_video)
    if args.distributed:
        train_sampler = DistributedSampler(train_sampler)
        test_sampler = DistributedSampler(test_sampler)

    data_loader = torch.utils.data.DataLoader(dataset,
                                              batch_size=args.batch_size,
                                              sampler=train_sampler,
                                              num_workers=args.workers,
                                              pin_memory=True,
                                              collate_fn=collate_fn)

    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=args.batch_size,
                                                   sampler=test_sampler,
                                                   num_workers=args.workers,
                                                   pin_memory=True,
                                                   collate_fn=collate_fn)

    print("Creating model")
    model = torchvision.models.video.__dict__[args.model](
        pretrained=args.pretrained)
    model.to(device)
    if args.distributed and args.sync_bn:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    criterion = nn.CrossEntropyLoss()

    lr = args.lr * args.world_size
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.apex:
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.apex_opt_level)

    # convert scheduler to be per iteration, not per epoch, for warmup that lasts
    # between different epochs
    warmup_iters = args.lr_warmup_epochs * len(data_loader)
    lr_milestones = [len(data_loader) * m for m in args.lr_milestones]
    lr_scheduler = WarmupMultiStepLR(optimizer,
                                     milestones=lr_milestones,
                                     gamma=args.lr_gamma,
                                     warmup_iters=warmup_iters,
                                     warmup_factor=1e-5)

    model_without_ddp = model
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu])
        model_without_ddp = model.module

    if args.resume:
        checkpoint = torch.load(args.resume, map_location='cpu')
        model_without_ddp.load_state_dict(checkpoint['model'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
        args.start_epoch = checkpoint['epoch'] + 1

    if args.test_only:
        evaluate(model, criterion, data_loader_test, device=device)
        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader,
                        device, epoch, args.print_freq, args.apex)
        evaluate(model, criterion, data_loader_test, device=device)
        if args.output_dir:
            checkpoint = {
                'model': model_without_ddp.state_dict(),
                'optimizer': optimizer.state_dict(),
                'lr_scheduler': lr_scheduler.state_dict(),
                'epoch': epoch,
                'args': args
            }
            utils.save_on_master(
                checkpoint,
                os.path.join(args.output_dir, 'model_{}.pth'.format(epoch)))
            utils.save_on_master(
                checkpoint, os.path.join(args.output_dir, 'checkpoint.pth'))

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
예제 #60
0
    def run(self,
            checkpoint_dir,
            vid_dir,
            frame_ext,
            out_dir,
            amplification_factor,
            velocity_mag=False):
        """Magnify a video in the two-frames mode.

        Args:
            checkpoint_dir: checkpoint directory.
            vid_dir: directory containing video frames videos are processed
                in sorted order.
            out_dir: directory to place output frames and resulting video.
            amplification_factor: the amplification factor,
                with 0 being no change.
            velocity_mag: if True, process video in Dynamic mode.
        """
        vid_name = os.path.basename(out_dir)
        # make folder
        mkdir(out_dir)
        vid_frames = sorted(glob(os.path.join(vid_dir, '*.' + frame_ext)))
        first_frame = vid_frames[0]
        im = imread(first_frame)
        image_height, image_width = im.shape
        if not self.is_graph_built:
            self.setup_for_inference(checkpoint_dir, image_width, image_height)
        try:
            i = int(self.ckpt_name.split('-')[-1])
            print("Iteration number is {:d}".format(i))
            vid_name = vid_name + '_' + str(i)
        except:
            print("Cannot get iteration number")
        if velocity_mag:
            print("Running in Dynamic mode")

        prev_frame = first_frame
        desc = vid_name if len(vid_name) < 10 else vid_name[:10]
        for frame in tqdm(vid_frames, desc=desc):
            file_name = os.path.basename(frame)
            out_amp, diff = self.inference(prev_frame, frame, amplification_factor)
            # print(self.probe_pt["mani_after_conv"])
            print(type(out_amp))
            print(type(diff))
            print(out_amp.shape)
            print(diff.shape)
            # with tf.Session() as sess:
            #     data_numpy = self.probe_pt["mani_after_conv"].eval()
            #     print(type(data_numpy))

            # mag diff to save
            self.save_out_shape_enc.append(diff)

            im_path = os.path.join(out_dir, file_name)
            save_images(out_amp, [1, 1], im_path)
            if velocity_mag:
                prev_frame = frame

        # Try to combine it into a video
        call([DEFAULT_VIDEO_CONVERTER, '-y', '-f', 'image2', '-r', '30', '-i',
              os.path.join(out_dir, '%d.bmp'), '-c:v', 'libx264',
              os.path.join(out_dir, vid_name + '.mp4')]
            )