Example #1
0
def get_organon_settings(path_to_extension):
    
    json_pfad = PATH.join(path_to_extension,'organon_settings.json')
        
    with codecs_open(json_pfad) as data: 
        try:
            content = data.read()
            settings_orga = json.loads(content)  
        except Exception as e:
            with codecs_open(json_pfad) as data:  
                content = data.read().decode()  
                settings_orga = json.loads(content)
                
    return settings_orga
Example #2
0
 def erstelle_quelltext_html(self,quelldatei):
     
     
     self.quelltext = []
     
     self.quelltext.append(self.Praeambel())
     self.quelltext.append('<div style="padding:3%">')
     
     pfad = os.path.join(self.dir_path,quelldatei)
     with codecs_open( pfad, 'r',"utf-8") as file:
         lines = file.readlines()
     
     for l in lines:
         
         if 'XXXXX' in l:
             t = l.replace('XXXXX','').replace('\n','')
             sprung = '<a name="{0}">{0}</a>'.format(t)
             self.quelltext.append(sprung)
             continue
         
         self.quelltext.append(l.replace('<','&lt;').replace('>','&gt;'))
     
     self.quelltext.append('</div>')   
     self.quelltext.append(self.ende())
     
     quelle = '<br>\n'.join(self.quelltext)
     
     pfad = os.path.join(self.dir_path,'source.html')
     self.speicher(quelle, 'w', pfad)
Example #3
0
def _load_json(file_path, encoding):
    try:
        with codecs_open(file_path, encoding=encoding) as f:
            text = f.read()
        return json.loads(text)
    except ValueError:
        pass
Example #4
0
    def get_settings_of_previous_installation(self,package_folder, extension_folder):
        try:
            dirs = [name for name in listdir(package_folder) if PATH.isdir(PATH.join(package_folder, name))]
            dirs.remove(extension_folder)
            
            files = None   
            organon_in_files = False    
             
            for d in dirs:
                files = listdir(PATH.join(package_folder,d))
                if 'organon.oxt' in files:
                    organon_in_files = True
                    break
            
            if files == None or organon_in_files == False :
                return None

            json_pfad_alt = PATH.join(package_folder,d,'organon.oxt','organon_settings.json')
            
            with codecs_open(json_pfad_alt) as data:  
                content = data.read().decode()  
                settings_orga_prev = json.loads(content)

            return settings_orga_prev
    
        except Exception as e:
            return None
Example #5
0
    def test_expand_file_prefixed_files(self):
        f = tempfile.NamedTemporaryFile(delete=False)
        f.close()

        f_with_bom = tempfile.NamedTemporaryFile(delete=False)
        f_with_bom.close()

        with open(f.name, 'w+') as stream:
            stream.write('foo')

        from codecs import open as codecs_open
        with codecs_open(f_with_bom.name, encoding='utf-8-sig', mode='w+') as stream:
            stream.write('foo')

        cases = [
            [['bar=baz'], ['bar=baz']],
            [['bar', 'baz'], ['bar', 'baz']],
            [['bar=@{}'.format(f.name)], ['bar=foo']],
            [['bar=@{}'.format(f_with_bom.name)], ['bar=foo']],
            [['bar', '@{}'.format(f.name)], ['bar', 'foo']],
            [['bar', f.name], ['bar', f.name]],
            [['[email protected]'], ['[email protected]']],
            [['bar', '*****@*****.**'], ['bar', '*****@*****.**']],
            [['bar=mymongo=@connectionstring'], ['bar=mymongo=@connectionstring']]
        ]

        for test_case in cases:
            try:
                args = Application._expand_file_prefixed_files(test_case[0])  # pylint: disable=protected-access
                self.assertEqual(args, test_case[1], 'Failed for: {}'.format(test_case[0]))
            except CLIError as ex:
                self.fail('Unexpected error for {} ({}): {}'.format(test_case[0], args, ex))

        os.remove(f.name)
def parse(fn):
    source = codecs_open(fn, encoding='utf-8').read()
    stream = ANTLRStringStream(source)
    lexer = STLexer(stream)
    parser = STParser(CommonTokenStream(lexer))
    output = {}
    parser.source_mapping(output)
    return output
Example #7
0
File: util.py Project: bobflagg/tme
def write_utf_8_file(path, data):  
    '''
    Writes the given unicode string to the file with the given path, using
    UTF-8 encoding.
    '''
    fileObj = codecs_open(path, "w", "utf-8" )
    fileObj.write(data)
    fileObj.close()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-t', '--tagged-dataset-path',
                        default=os.path.join('.', 'tagged', 'data'),
                        help='Directory containing CoreNLP-tagged dataset TSV file')
    parser.add_argument('prediction_path',
                        help='Path to the prediction file. Each line contains '
                        'ex_id <tab> item1 <tab> item2 <tab> ...')
    args = parser.parse_args()

    # ID string --> list[Value]
    target_values_map = {}
    for filename in os.listdir(args.tagged_dataset_path):
        filename = os.path.join(args.tagged_dataset_path, filename)
        print('Reading dataset from', filename, file=sys.stderr)
        with codecs_open(filename, 'r', 'utf8') as fin:
            header = fin.readline().rstrip('\n').split('\t')
            for line in fin:
                stuff = dict(zip(header, line.rstrip('\n').split('\t')))
                ex_id = stuff['id']
                original_strings = tsv_unescape_list(stuff['targetValue'])
                canon_strings = tsv_unescape_list(stuff['targetCanon'])
                target_values_map[ex_id] = to_value_list(
                        original_strings, canon_strings)
    print('Read', len(target_values_map), 'examples', file=sys.stderr)

    print('Reading predictions from', args.prediction_path, file=sys.stderr)
    num_examples, num_correct = 0, 0
    with codecs_open(args.prediction_path, 'r', 'utf8') as fin:
        for line in fin:
            line = line.rstrip('\n').split('\t')
            ex_id = line[0]
            if ex_id not in target_values_map:
                print('WARNING: Example ID "%s" not found' % ex_id)
            else:
                target_values = target_values_map[ex_id]
                predicted_values = to_value_list(line[1:])
                correct = check_denotation(target_values, predicted_values)
                print(u'%s\t%s\t%s\t%s' % (ex_id, correct, target_values,
                                           predicted_values))
                num_examples += 1
                if correct:
                    num_correct += 1
    print('Examples:', num_examples, file=sys.stderr)
    print('Correct:', num_correct, file=sys.stderr)
    print('Accuracy:', round((num_correct + 1e-9) / (num_examples + 1e-9), 4), file=sys.stderr)
Example #9
0
def scrape(project_id, source_url):
    print source_url
    r = requests.get(source_url)
    fp = codecs_open("%s/html/%s.html" % (HOME_DIR, project_id), "w", "utf-8" )
    soup = BeautifulSoup(r.text)
    text = " ".join(x.text for x in soup.find("div", {'class':"post"}).findAll("p"))

    #content = etree.fromstring(r.text,encoding="utf-8").text_content(encoding="utf-8")
    fp.write(text)
    fp.close()
Example #10
0
 def run(self):
     fname = self.arguments.pop(0)
     with codecs_open(os.path.join('listings', fname), 'rb+', 'utf8') as fileobject:
         self.content = fileobject.read().splitlines()
     self.trim_content()
     target = urlunsplit(("link", 'listing', fname, '', ''))
     generated_nodes = (
         [core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])
     generated_nodes += self.get_code_from_file(fileobject)
     return generated_nodes
Example #11
0
def parse_data(input_file, lang = 'en'):
    temp_list = []
    append = temp_list.append
    lines = codecs_open(input_file, 'r', 'utf8').readlines()
    for word in itertools_chain.from_iterable(
        line.split() for line in lines):
        append(word)
    if lang == 'gr':
        temp_list[0] = temp_list[0][1:]
    return temp_list
Example #12
0
    def verbotene_buchstaben_auswechseln(self,content):    
        if self.mb.debug: log(inspect.stack)
         
        try:
            ausgewechselte = []
            content = ''.join(content)
             
            for b in verbotene_Buchstaben:
                anz = content.count(b)
                 
                if anz > 0:
                     
                      
                    if verbotene_Buchstaben[b] == '':
                        tausch = 'XXX %s XXX'%anz
                    else:
                        tausch = verbotene_Buchstaben[b]
                    content = content.replace(b,tausch)
                     
                    mitteil = b , str(anz) , b.encode("unicode_escape"),tausch
                    ausgewechselte.append(mitteil) 
                 
                 
            pfad_a = os.path.join(self.path,'exchanged_letters.txt')
             
            a2 = 10
            b = 15
            c = 20
             
            with codecs_open( pfad_a, 'w',"utf-8") as file:
                    top = 'Symbol'.ljust(a2) + u'Amount'.ljust(b) + 'Unicode Number'.ljust(c)+ 'exchanged with:' + '\r\n'
                    file.write(top)
                     
            for aus in ausgewechselte:
                                 
                symbol = aus[0].ljust(a2) + aus[1].ljust(b) + aus[2].ljust(c) + aus[3].ljust(c) + '\r\n'
                with codecs_open( pfad_a, 'a',"utf-8") as file:
                    file.write(symbol)

            return content
        except:
            log(inspect.stack,tb())
Example #13
0
 def speicher(self,inhalt,mode,pfad):
     if self.mb.debug: log(inspect.stack)
     
     try:
         if not os.path.exists(os.path.dirname(pfad)):
             os.makedirs(os.path.dirname(pfad))            
             
         with codecs_open( pfad, mode,"utf-8") as file:
             file.write(inhalt)
     except:
         log(inspect.stack,tb())
Example #14
0
 def load(self, filename, max_age=0):
     self.filename = filename
     self.data = {}
     try:
         if max_age > 0:
             st = os.stat(self.filename)
             if st.st_mtime + max_age < time.clock():
                 self.save()
         with codecs_open(self.filename, 'r', encoding=self._encoding) as f:
             self.data = json.load(f)
     except (OSError, IOError):
         self.save()
Example #15
0
def get_file_json(file_path):
    for encoding in ('utf-8', 'utf-8-sig', 'utf-16', 'utf-16le', 'utf-16be'):
        try:
            with codecs_open(file_path, encoding=encoding) as f:
                text = f.read()
            return json.loads(text)
        except UnicodeError:
            pass
        except Exception as ex:
            raise CLIError("File '{}' contains error: {}".format(file_path, str(ex)))

    raise CLIError('Failed to decode file {} - unknown decoding'.format(file_path))
Example #16
0
def scrape_ciid():
    dbm = DataManager()
    for pid, source_url in dbm.get_ciid_projects():
        print '\n-----\n'
        print "working on project %s" % pid
        print "url: %s" % source_url
        print "====="
        try:
            fp = codecs_open("%s/html/ciid/%s.html" % (HOME_DIR, pid), "r", "utf-8" )
            html = fp.read()
            fp.close()
            soup = BeautifulSoup(html, convertEntities=BeautifulSoup.HTML_ENTITIES)
            fp = codecs_open("%s/html/processed/%s.txt" % (HOME_DIR, pid), "w", "utf-8" )
            text = " ".join(x.text for x in soup.find("div", {'class':"post"}).findAll("p"))
            fp.write(text)
            fp.close()
            print text
            sleep(2)
        except Exception, e:
            print "failed"
            print e.message()
Example #17
0
 def run(self):
     fname = self.arguments.pop(0)
     lang = self.arguments.pop(0)
     fpath = os.path.join('listings', fname)
     self.arguments.insert(0, fpath)
     self.options['code'] = lang
     with codecs_open(fpath, 'rb+', 'utf8') as fileobject:
         self.content = fileobject.read().splitlines()
     self.state.document.settings.record_dependencies.add(fpath)
     target = urlunsplit(("link", 'listing', fname, '', ''))
     generated_nodes = (
         [core.publish_doctree('`{0} <{1}>`_'.format(fname, target))[0]])
     generated_nodes += self.get_code_from_file(fileobject)
     return generated_nodes
Example #18
0
def collect_source_data():
    dbm = DataManager()
    for pid, source_url in dbm.get_project_source():
        print "working on project %s" % pid
        print "url: %s" % source_url
        try:
            r = requests.get(source_url)
            fp = codecs_open("%s/html/%s.html" % (HOME_DIR, pid), "w", "utf-8" )
            fp.write(r.text)
            fp.close()
            sleep(1)
        except ConnectionError, e:
            print "failed"
            print e.message
Example #19
0
File: main.py Project: akleiw/KSP
def _stdstream(path = None):
	from codecs import open as codecs_open

	if path:
		server_log = os.path.join(path, 'server.log')
		sys.stdout = codecs_open(server_log, mode = 'a', encoding = 'latin1', errors = 'backslashreplace')
		sys.stderr = sys.stdout # open(os.path.join(config.logs_path, 'stderr.log'), 'w', 1)

	if sys.__stdout__ is None:
		sys.__stdout__ = sys.stdout
	if sys.__stderr__ is None:
		sys.__stderr__ = sys.stderr

	return sys.stdout
Example #20
0
    def persist_cached_creds(self):
       #be compatible with azure-xplat-cli, use 'ascii' so to save w/o a BOM
        with codecs_open(self._token_file, 'w', encoding='ascii') as cred_file:
            items = self.adal_token_cache.read_items()
            all_creds = [entry for _, entry in items]

            #trim away useless fields (needed for cred sharing with xplat)
            for i in all_creds:
                for key in TOKEN_FIELDS_EXCLUDED_FROM_PERSISTENCE:
                    i.pop(key, None)

            all_creds.extend(self._service_principal_creds)
            cred_file.write(json.dumps(all_creds))
        self.adal_token_cache.has_state_changed = False
Example #21
0
def get_file_json(file_path, throw_on_empty=True):
    #always try 'utf-8-sig' first, so that BOM in WinOS won't cause trouble.
    for encoding in ('utf-8-sig', 'utf-8', 'utf-16', 'utf-16le', 'utf-16be'):
        try:
            with codecs_open(file_path, encoding=encoding) as f:
                text = f.read()

            if not text and not throw_on_empty:
                return None

            return json.loads(text)
        except UnicodeError:
            pass
        except Exception as ex:
            raise CLIError("File '{}' contains error: {}".format(file_path, str(ex)))

    raise CLIError('Failed to decode file {} - unknown decoding'.format(file_path))
Example #22
0
def read_file_content(file_path, allow_binary=False):
    from codecs import open as codecs_open
    # Note, always put 'utf-8-sig' first, so that BOM in WinOS won't cause trouble.
    for encoding in ['utf-8-sig', 'utf-8', 'utf-16', 'utf-16le', 'utf-16be']:
        try:
            with codecs_open(file_path, encoding=encoding) as f:
                return f.read()
        except UnicodeDecodeError:
            if allow_binary:
                with open(file_path, 'rb') as input_file:
                    return input_file.read()
            else:
                raise
        except UnicodeError:
            pass

    raise CLIError('Failed to decode file {} - unknown decoding'.format(file_path))
Example #23
0
 def speicher(self,inhalt,mode,pfad = None):
     if self.mb.debug: log(inspect.stack)
     
     try:
     
         if pfad == None:
             pfad = os.path.join(self.path,self.dateiname+'.tex')
         
         if not os.path.exists(os.path.dirname(pfad)):
             os.makedirs(os.path.dirname(pfad))
             
         
         content = ''.join(inhalt)
         
             
         with codecs_open( pfad, mode,"utf-8") as file:
             file.write(content)
     except:
         log(inspect.stack,tb())
Example #24
0
 def process_file(self, filename):
     messages = []
     for checker in self.checkers:
         with codecs_open(filename, encoding='utf-8') as f:
             lines = f.readlines()
             # if the first or second line of the file is a coding, strip it
             # as it'll break ast.parse/compile
             n = len(lines)
             if n and '# -*- coding:' in lines[0]:
                 lines = lines[1:]
             elif n > 1 and '# -*- coding:' in lines[1]:
                 lines[1] = ''
             try:
                 messages.extend(checker.check(filename, lines))
             except UnicodeDecodeError:
                 from pprint import pprint
                 pprint(lines)
                 raise
     return messages
Example #25
0
def read_file_content(file_path, allow_binary=False):
    from codecs import open as codecs_open
    # Note, always put 'utf-8-sig' first, so that BOM in WinOS won't cause trouble.
    for encoding in ['utf-8-sig', 'utf-8', 'utf-16', 'utf-16le', 'utf-16be']:
        try:
            with codecs_open(file_path, encoding=encoding) as f:
                logger.debug("attempting to read file %s as %s", file_path, encoding)
                return f.read()
        except (UnicodeError, UnicodeDecodeError):
            pass

    if allow_binary:
        try:
            with open(file_path, 'rb') as input_file:
                logger.debug("attempting to read file %s as binary", file_path)
                return base64.b64encode(input_file.read()).decode("utf-8")
        except Exception:  # pylint: disable=broad-except
            pass
    raise CLIError('Failed to decode file {} - unknown decoding'.format(file_path))
Example #26
0
    def open(self, f, mode='r', cols=None, **kwargs):
        """Arguments:

            f: An open file object to read or write to/from
            mode: 'r' or 'w' weither to read or write
            cols: a list of column names (as strings) to write
        """ 

        if isinstance(cols, six.string_types):
            cols = cols.split(",")
        if isinstance(f, six.string_types):
            self._f = codecs_open(f, mode=mode)
        else:
            self._f = f
        if cols is None:
            cols = self.cols
        if mode == 'r':
            return self._CsvReader(self._f, **kwargs)
        else:
            return self._CsvWriter(self._f, cols, **kwargs)
Example #27
0
    def oeffne_text(self,pfad):   
        if self.mb.debug: log(inspect.stack)
        
        extension = os.path.splitext(pfad)[1]
        name = os.path.basename(pfad)
        
        if extension == '.txt':
 
            with codecs_open( pfad, "r",'utf-8') as file:
                text = file.readlines()
            
        else:
            prop = uno.createUnoStruct("com.sun.star.beans.PropertyValue")
            prop.Name = 'Hidden'
            prop.Value = True
    
            doc = self.mb.desktop.loadComponentFromURL(uno.systemPathToFileUrl(pfad),'_blank',8+32,(prop,))
            
            text = doc.Text#.String.splitlines()
            doc.close(False)
        
        return text,name
Example #28
0
    def load(self, filename, max_age=0):
        self.filename = filename
        self.data = {}
        try:
            if max_age > 0:
                st = os.stat(self.filename)
                if st.st_mtime + max_age < time.clock():
                    self.save()
            with codecs_open(self.filename, 'r', encoding=self._encoding) as f:
                self.data = json.load(f)
        except (OSError, IOError, t_JSONDecodeError) as load_exception:
            # OSError / IOError should imply file not found issues which are expected on fresh runs (e.g. on build
            # agents or new systems). A parse error indicates invalid/bad data in the file. We do not wish to warn
            # on missing files since we expect that, but do if the data isn't parsing as expected.
            log_level = logging.INFO
            if isinstance(load_exception, t_JSONDecodeError):
                log_level = logging.WARNING

            get_logger(__name__).log(log_level,
                                     "Failed to load or parse file %s. It will be overridden by default settings.",
                                     self.filename)
            self.save()
Example #29
0
    def log(self,args,traceb = None,extras = None):

        try:
            
            if isinstance(args, list):
                
                info = "*** ERROR: *** \nCan't catch exact description. \nStack:"
                text = [ '{0} {1} {2}'.format(s[1], s[2], s[3]) for s in args ]
                
                msg = '\n'.join([info] + text)
                path2 = join(self.location_debug_file,'error_log.txt')
                with codecs_open( path2, "a","utf-8") as f:
                    f.write('### ERROR ### \r\n')
                    f.write(msg + '\r\n')
                print(msg)

                return
            
            
            info = args()

            try:
                caller = info[2][3]
                caller_class = info[2][0].f_locals['self'].__class__.__name__
            except:
                caller = 'EventObject'
                caller_class = ''
                
            
            call = caller_class + '.' + caller + ' )'
            
            if self.log_args:
                try:
                    argues = self.format_argues(info[1][0].f_locals)
                except:
                    argues = ''
                
            function = info[1][3]
            try:
                xclass = info[1][0].f_locals['self'].__class__.__name__
            except:
                # Wenn aus einer Methode ohne Klasse gerufen wird, existiert kein 'self'
                xclass = str(info[1][0])
                
            try:
                modul = basename(info[1][1]).split('.')[0]
            except:
                modul = ''

#             if xclass in ('ViewCursor_Selection_Listener'):
#                 return
#             
#             if function in ('mouseEntered','mouseExited','entferne_Trenner'):
#                 return
            
            if function in ('mouseEntered','mouseExited'):
                return
            
            

            if self.log_args:
                string = '{0: <8.8}  {1: <12.12}  {2: <24.24}  {3: <34.34}  ( caller: {4: <44.44}  args:{5}'.format(self.debug_time(),modul,xclass,function,call,argues)
            else:
                string = '{0: <8.8}  {1: <12.12}  {2: <24.24}  {3: <34.34}  ( caller: {4: <44.44}'.format(self.debug_time(),modul,xclass,function,call)

            try:
                print(string)
            except:
                pass
            
            if self.write_debug_file:
                path = join(self.location_debug_file,'organon_log.txt')
                with codecs_open( path, "a","utf-8") as f:
                    f.write(string+'\n')
                
                if traceb != None:
                    print(traceb)
                    
                    with codecs_open( path, "a","utf-8") as f:
                        f.write('### ERROR ### \r\n')
                        try:
                            f.write(traceb+'\r\n')
                        except:
                            print('ERROR ON WRITING ERROR TO FILE')
                            f.write(str(traceb)+'\r\n')
    
                if extras != None:
                    print(extras)
                    with codecs_open( path, "a","utf-8") as f:
                        f.write(extras+'\r\n')
            
#             self.suche()        
#             # HELFER          
#             nachricht = self.suche()
#             if nachricht != None:
#                 with open(path , "a") as f:
#                     f.write(nachricht+'\r\n')
            
#             self.helfer()
            
            # Fehler werden auf jeden Fall geloggt        
            if traceb != None:
                
                path2 = join(self.location_debug_file,'error_log.txt')
                with codecs_open( path2, "a","utf-8") as f:
                    f.write('### ERROR ###1 \r\n')
                    f.write(traceb+'\r\n')
                
                try:
                    if not self.write_debug_file:
                        print(traceb)
                except:
                    pass
            
        except Exception as e:
            try:
                print(str(e))
                print(tb())
                path = join(self.location_debug_file,'organon_log_error.txt')
                with codecs_open( path, "a","utf-8") as f:
                    f.write(str(e) +'\r\n')
                    f.write(str(tb()) +'\r\n')
            except:
                print(tb())
                with codecs_open( path, "a","utf-8") as f:
                    f.write(str(tb()) +'\r\n')
Example #30
0
"""Packaging settings."""
from codecs import open as codecs_open
from os.path import abspath, dirname, join

from setuptools import find_packages, setup

THIS_DIR = abspath(dirname(__file__))


def local_scheme(version):  # pylint: disable=unused-argument
    # type: (str) -> str
    """Skip the local version (eg. +xyz) to upload to Test PyPI."""
    return ""


with codecs_open(join(THIS_DIR, 'README.md'), encoding='utf-8') as readfile:
    LONG_DESCRIPTION = readfile.read()


INSTALL_REQUIRES = [
    'Send2Trash',
    'awacs',  # for embedded hooks
    # awscli included for embedded hooks and aws subcommand
    'awscli>=1.16.308<2.0',
    'backports.tempfile; python_version < "3.2"',
    'botocore>=1.12.111',  # matching awscli/boto3 requirement
    'boto3>=1.9.111<2.0',
    'cfn_flip>=1.2.1',  # 1.2.1+ require PyYAML 4.1+
    'cfn-lint',
    'docker',
    'docopt',
Example #31
0
import os
from codecs import open as codecs_open

from setuptools import find_packages
from setuptools import setup

base_dir = os.path.abspath(os.path.dirname(__file__))

with codecs_open(base_dir + '/README.md', encoding='utf-8') as f:
    long_description = f.read()

setup(
    name='drf-autoview',
    author='Deepak Sheoran',
    version='1.0.0',
    description='Django drf api generator',
    long_description=long_description,
    long_description_content_type="text/markdown",
    classifiers=[
        "Programming Language :: Python", "Programming Language :: Python :: 3"
    ],
    url="https://github.com/sheoran/django-drf-autoview.git",
    packages=find_packages(exclude=['tests']),
    install_requires=['djangorestframework'],
    zip_safe=False,
)
def st_graph(html):
    graph_file = codecs_open(html, 'r')
    page = graph_file.read()
    stc.html(page, width=1000, height=600)
Example #33
0
"""Packaging settings."""
from codecs import open as codecs_open
from os.path import abspath, dirname, join

from setuptools import find_packages, setup

THIS_DIR = abspath(dirname(__file__))


def local_scheme(version):  # pylint: disable=unused-argument
    # type: (str) -> str
    """Skip the local version (eg. +xyz) to upload to Test PyPI."""
    return ""


with codecs_open(join(THIS_DIR, "README.md"), encoding="utf-8") as readfile:
    LONG_DESCRIPTION = readfile.read()

INSTALL_REQUIRES = [
    "Send2Trash",
    "awacs",  # for embedded hooks
    # awscli included for embedded hooks and aws subcommand
    "awscli>=1.16.308<2.0",
    'backports.tempfile; python_version < "3.2"',
    "botocore>=1.12.111",  # matching awscli/boto3 requirement
    "boto3>=1.9.111<2.0",
    "cfn_flip>=1.2.1",  # 1.2.1+ require PyYAML 4.1+
    "cfn-lint",
    "click>=7.1",
    "coloredlogs",
    "docker",
Example #34
0
def uopen(filename):
    return codecs_open(filename, encoding = 'utf8')
Example #35
0
"""Build the snuggs package."""

from codecs import open as codecs_open
from setuptools import setup, find_packages


# Get the long description from the relevant file
with codecs_open("README.rst", encoding="utf-8") as f:
    long_description = f.read()

with open("snuggs/__init__.py") as f:
    for line in f:
        if line.startswith("__version__"):
            version = line.split("=")[1]
            version = version.strip().strip('"')
            break

setup(
    name="snuggs",
    version=version,
    description=u"Snuggs are s-expressions for Numpy",
    long_description=long_description,
    classifiers=[],
    keywords="",
    author=u"Sean Gillies",
    author_email="*****@*****.**",
    url="https://github.com/mapbox/snuggs",
    license="MIT",
    packages=find_packages(exclude=["ez_setup", "examples", "tests"]),
    include_package_data=True,
    zip_safe=False,
Example #36
0
def _patch_specfile(obfdist,
                    src,
                    specfile,
                    hookpath=None,
                    encoding=None,
                    modname='pytransform'):
    if encoding is None:
        with open(specfile, 'r') as f:
            lines = f.readlines()
    else:
        with codecs_open(specfile, 'r', encoding) as f:
            lines = f.readlines()

    p = os.path.abspath(obfdist)
    patched_lines = (
        "", "# Patched by PyArmor", "_src = %s" % repr(os.path.abspath(src)),
        "for i in range(len(a.scripts)):",
        "    if a.scripts[i][1].startswith(_src):",
        "        x = a.scripts[i][1].replace(_src, r'%s')" % p,
        "        if os.path.exists(x):",
        "            a.scripts[i] = a.scripts[i][0], x, a.scripts[i][2]",
        "for i in range(len(a.pure)):",
        "    if a.pure[i][1].startswith(_src):",
        "        x = a.pure[i][1].replace(_src, r'%s')" % p,
        "        if os.path.exists(x):",
        "            if hasattr(a.pure, '_code_cache'):",
        "                with open(x) as f:",
        "                    a.pure._code_cache[a.pure[i][0]] = compile(f.read(), a.pure[i][1], 'exec')",
        "            a.pure[i] = a.pure[i][0], x, a.pure[i][2]",
        "# Patch end.", "", "")

    if encoding is not None and sys.version_info[0] == 2:
        patched_lines = [x.decode(encoding) for x in patched_lines]

    for i in range(len(lines)):
        if lines[i].startswith("pyz = PYZ("):
            lines[i:i] = '\n'.join(patched_lines)
            break
    else:
        raise RuntimeError('Unsupport .spec file, no "pyz = PYZ" found')

    if hookpath is not None:
        for k in range(len(lines)):
            if lines[k].startswith('a = Analysis('):
                break
        else:
            raise RuntimeError('Unsupport .spec file, no "a = Analysis" found')
        n = i
        keys = []
        for i in range(k, n):
            if lines[i].lstrip().startswith('pathex='):
                lines[i] = lines[i].replace('pathex=',
                                            'pathex=[r"%s"]+' % hookpath, 1)
                keys.append('pathex')
            elif lines[i].lstrip().startswith('hiddenimports='):
                lines[i] = lines[i].replace('hiddenimports=',
                                            'hiddenimports=["%s"]+' % modname,
                                            1)
                keys.append('hiddenimports')
            elif lines[i].lstrip().startswith('hookspath='):
                lines[i] = lines[i].replace('hookspath=',
                                            'hookspath=[r"%s"]+' % hookpath, 1)
                keys.append('hookspath')
        d = set(['pathex', 'hiddenimports', 'hookspath']) - set(keys)
        if d:
            raise RuntimeError('Unsupport .spec file, no %s found' % list(d))

    patched_file = specfile[:-5] + '-patched.spec'
    if encoding is None:
        with open(patched_file, 'w') as f:
            f.writelines(lines)
    else:
        with codecs_open(patched_file, 'w', encoding) as f:
            f.writelines(lines)

    return os.path.normpath(patched_file)
Example #37
0
def open_utf8(fileName, mode):
    """Open all files in UTF-8"""
    return codecs_open(fileName,
                       mode,
                       encoding='utf-8')
Example #38
0
from setuptools import setup
from codecs import open as codecs_open
from os import path

here = path.abspath(path.dirname(__file__))

with codecs_open(path.join(here, 'README.rst'), encoding='utf-8') as f:
    long_description = f.read()

setup(
    name='sshpubkeys',
    version='3.1.0',
    description='SSH public key parser',
    long_description=long_description,
    url='https://github.com/ojarva/python-sshpubkeys',
    author='Olli Jarva',
    author_email='*****@*****.**',
    license='BSD',
    classifiers=[
        'Development Status :: 4 - Beta',
        'Intended Audience :: Developers',
        'Intended Audience :: System Administrators',
        'Topic :: Security',
        'License :: OSI Approved :: BSD License',
        'Programming Language :: Python :: 3',
        'Programming Language :: Python :: 3.4',
        'Programming Language :: Python :: 3.5',
        'Programming Language :: Python :: 3.6',
        'Programming Language :: Python :: 3.7',
        'Programming Language :: Python :: Implementation :: PyPy',
    ],
Example #39
0
from codecs import open as codecs_open
from setuptools import setup, find_packages


# Get the long description from the relevant file
with codecs_open('INFO.rst', encoding='utf-8') as f:
    long_description = f.read()


setup(name='paicemana',
      version='0.0.1',
      description="Helper script for works at OSMBrasil/semanario",
      long_description=long_description,
      classifiers=[],
      keywords=['openstreetmap', 'osm', 'translations', 'weeklyosm'],
      author="Alexandre Magno",
      author_email='*****@*****.**',
      url='https://github.com/OSMBrasil/paicemana',
      license='GPLv3+',
      packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
      include_package_data=True,
      zip_safe=False,
      install_requires=[
          'click',
          'html2text',
          'lxml'
      ],
      entry_points="""
      [console_scripts]
      paicemana=paicemana.scripts.cli:cli
      """
Example #40
0
def _read_file_content(file_path):
    file_text = None
    if os.path.isfile(file_path):
        with codecs_open(file_path, 'r', encoding='ascii') as file_to_read:
            file_text = file_to_read.read()
    return file_text
Example #41
0
"""
Sample poll app.
"""
from codecs import open as codecs_open
from os import path

from setuptools import setup, find_packages


HERE = path.abspath(path.dirname(__file__))

with codecs_open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
    LONG_DESCRIPTION = f.read()

setup(
    name='mkpoll',
    version='0.1.0',
    description='Sample Poll REST app',
    long_description=LONG_DESCRIPTION,
    url='https://github.com/bogdan-cornianu/mkpoll',
    author='PBS Education',
    author_email='*****@*****.**',
    license='GPLv3',
    classifiers=[
        'Development Status :: 3 - Alpha',
        'Environment :: Web Environment',
        'Framework :: Django :: 1.11',
        'Intended Audience :: Developers',
        'Topic :: Software Development :: Build Tools',
        'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
        'Operating System :: OS Independent',
Example #42
0
"""setup.py"""
from codecs import open as codecs_open
from setuptools import setup

with codecs_open("README.md", "r", "utf-8") as f:
    README = f.read()

setup(
    author="Beau Barker",
    author_email="*****@*****.**",
    classifiers=[
        "Programming Language :: Python :: 3.6",
        "Programming Language :: Python :: 3.7",
        "Programming Language :: Python :: 3.8",
    ],
    description="Process JSON-RPC requests",
    extras_require={
        "tox": ["tox"],
        "examples": [
            "aiohttp",
            "aiozmq",
            "flask",
            "flask-socketio",
            "gmqtt",
            "pyzmq",
            "tornado",
            "websockets",
            "werkzeug",
        ],
    },
    include_package_data=True,
Example #43
0
 def save(self):
     if self.filename:
         with codecs_open(self.filename, 'w', encoding=self._encoding) as f:
             json.dump(self.data, f)
Example #44
0
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.test import test as TestCommand
from codecs import open as codecs_open
from os import path, environ
from platform import system, architecture

if not sys.version_info[:2] >= (2, 6):
    print('ERROR: Python 2.6 or newer is required')
    sys.exit(1)
if system() == 'Windows' and architecture()[0] == '32bit':
    print('ERROR: Mqlight requires 64bit Python on Windows.')
    sys.exit(1)

HERE = path.abspath(path.dirname(__file__))
with codecs_open(path.join(HERE, 'description.rst'), encoding='utf-8') as f:
    LONG_DESCRIPTION = f.read()

if system() == 'Darwin':
    environ['ARCHFLAGS'] = '-arch x86_64 -mmacosx-version-min=10.8'


def get_sources():
    """Return a list of source files to compile into the extension"""
    if system() == 'Windows':
        return [path.join('mqlight', 'cproton.cxx')]
    else:
        return [path.join('mqlight', 'cproton.c')]


def get_runtime_library_dirs():
Example #45
0
from codecs import open as codecs_open
from setuptools import setup, find_packages

# Get the long description from the relevant file
with codecs_open('README.rst', encoding='utf-8') as f:
    long_description = f.read()

setup(name='data',
      version='0.0.1',
      description=u"riak-api",
      long_description='Riak KV api wrapper',
      classifiers=[],
      keywords='',
      author=u"Jaime Viloria",
      author_email='*****@*****.**',
      license='MIT',
      packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
      include_package_data=True,
      zip_safe=False,
      install_requires=['flask', 'riak', 'click', 'requests', 'ldap3'],
      extras_require={
          'test': ['pytest'],
      },
      entry_points="""
      [console_scripts]
      riak_http=app.lib.http_controller:run
      riak_cli=app.cli.v1:cli
      """)
Example #46
0
#!/usr/bin/env python

from setuptools import setup, find_packages
from codecs import open as codecs_open

try:
    import pypandoc
    long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
    long_description = codecs_open('README.rst', encoding="utf8").read()

with open('requirements.txt') as f:
    requirements = f.read().splitlines()

version = '1.0.5'

setup(
    name='py-frappe-client',
    version=version,
    install_requires=requirements,
    author='Karan Sharma',
    author_email='*****@*****.**',
    packages=find_packages(),
    include_package_data=True,
    url='https://github.com/zerodhatech/py-frappe-client/',
    license='MIT',
    description='Python wrapper for Frappe API',
    long_description=long_description,
    classifiers=[
        'Development Status :: 5 - Production/Stable',
        'License :: OSI Approved :: MIT License',
Example #47
0
def read(*parts):
    file_path = os.path.join(os.path.dirname(__file__), *parts)
    return codecs_open(file_path, encoding='utf-8').read()
Example #48
0
                        action='store')
    parser.add_argument('directories',
                        metavar='dir',
                        help='Directories to search',
                        action='store',
                        nargs='+')
    parser.add_argument('-n',
                        '--count',
                        dest='count',
                        type=int,
                        help='Maximum number of tracks in generated playlist.',
                        default=0)
    parser.add_argument('-m',
                        '--method',
                        dest='method',
                        help='Playlist generation method',
                        choices=Playlist.methods.keys(),
                        default=Playlist.methods.keys()[0])
    args = parser.parse_args(argv[1:])

    tracks = list(Playlist.methods[args.method].generate(
        args.count, *args.directories))
    print >> stderr, 'Writing playlist file.'
    if args.playlist == '-':
        for t in tracks:
            print >> stdout, t
    else:
        with closing(codecs_open(args.playlist, 'w', 'utf8')) as f:
            for t in tracks:
                print >> f, t
def convert(stats, epg, epgfile):
    impl = miniDom.getDOMImplementation()
    dcmnt = impl.createDocument(None, u'tv', None)
    root = dcmnt.documentElement
    # create channels
    for stat in stats:
        if stats[0] == "webik":
            continue
        channNode = dcmnt.createElement(u'channel')
        displNode = dcmnt.createElement(u'display-name')
        displText = dcmnt.createTextNode(stat[1])
        displNode.appendChild(displText)
        displNode.setAttribute('lang', 'cs')
        channNode.appendChild(displNode)
        channNode.setAttribute('id', stat[0])
        root.appendChild(channNode)
    # create programms
    for stat in stats:
        if stat[0] == "webik":
            continue
        # load data of one channel to temporary list
        tmpday = []
        for day in epg:
            if stat[0] in day:
                for item in day[stat[0]]:
                    tmpprog = {}
                    tmpprog['title'] = item['title']
                    tmpprog['description'] = item['description']
                    if 'edition' in item and 'asset' in item['edition']:
                        tmpprog['icon'] = item['edition']['asset']
                    tmpprog['start'] = item['since']
                    tmpprog['stop'] = item['till']
                    tmpday.append(tmpprog)
        # check and repair time continuity
        tmpday2 = []
        flag = False
        for i in range(len(tmpday)):
            if flag:
                flag = False
                continue
            if i < len(tmpday) - 2:

                #                if tmpday[i]['start'] == tmpday[i + 1]['start']:
                #                    flag = True
                #                    if tmpday[i]['stop'] > tmpday[i + 1]['stop']:
                #                        tmpday2.append(tmpday[i])
                #                        continue
                #                    elif tmpday[i]['stop'] < tmpday[i + 1]['stop']:
                #                        tmpday2.append(tmpday[i + 1])
                #                        continue

                if tmpday[i]['start'] <= tmpday[i + 1]['start']:
                    if tmpday[i]['stop'] > tmpday[i + 1]['stop']:
                        flag = True
                        tmpday2.append(tmpday[i])
                        continue
                    if tmpday[i]['stop'] > tmpday[i + 1]['start']:
                        flag = True
                        tmpday2.append(tmpday[i])
                        continue
                elif tmpday[i]['start'] >= tmpday[i + 1]['start']:
                    if tmpday[i]['stop'] < tmpday[i + 1]['stop']:
                        flag = True
                        tmpday2.append(tmpday[i + 1])
                        continue
                    if tmpday[i]['start'] < tmpday[i + 1]['stop']:
                        flag = True
                        tmpday2.append(tmpday[i + 1])
                        continue

                if tmpday2 and tmpday[i]['start'] >= tmpday2[-1][
                        'start'] and tmpday[i]['stop'] <= tmpday2[-1]['stop']:
                    continue
            tmpday2.append(tmpday[i])
        for item in tmpday2:
            prgNode = dcmnt.createElement(u'programme')
            titleNode = dcmnt.createElement(u'title')
            titleText = dcmnt.createTextNode(item['title'])
            titleNode.appendChild(titleText)
            titleNode.setAttribute('lang', 'cs')
            prgNode.appendChild(titleNode)
            descNode = dcmnt.createElement(u'desc')
            descText = dcmnt.createTextNode(item['description'])
            descNode.appendChild(descText)
            descNode.setAttribute('lang', 'cs')
            prgNode.appendChild(descNode)
            if 'icon' in item:
                iconNode = dcmnt.createElement(u'icon')
                iconNode.setAttribute('src', item['icon'])
                prgNode.appendChild(iconNode)
            prgNode.setAttribute('start', convertTime(item['start']))
            prgNode.setAttribute('stop', convertTime(item['stop']))
            prgNode.setAttribute('channel', stat[0])
            root.appendChild(prgNode)
    with codecs_open(epgfile, "w", "utf-8") as out:
        dcmnt.writexml(out, addindent='    ', newl='\n', encoding="utf-8")
        out.close()
    def generate_output(self, writer):
        """
        Generates the sitemap file and the stylesheet file and puts them into the content dir.
        :param writer: the writer instance
        :type writer: pelican.writers.Writer
        """
        # write xml stylesheet
        with codecs_open(os.path.join(os.path.dirname(__file__),
                                      'sitemap-stylesheet.xsl'),
                         'r',
                         encoding='utf-8') as fd_origin:
            with codecs_open(os.path.join(self.path_output,
                                          'sitemap-stylesheet.xsl'),
                             'w',
                             encoding='utf-8') as fd_destination:
                xsl = fd_origin.read()
                # replace some template markers
                # TODO use pelican template magic
                xsl = xsl.replace('{{ SITENAME }}',
                                  self.context.get('SITENAME'))
                fd_destination.write(xsl)

        # will contain the url nodes as text
        urls = ''

        # get all articles sorted by time
        articles_sorted = sorted(self.context['articles'],
                                 key=self.__get_date_key,
                                 reverse=True)

        # get all pages sorted by time
        pages_sorted = sorted(self.context.get('pages'),
                              key=self.__get_date_key,
                              reverse=True)

        # the landing page
        if 'index' in self.context.get('DIRECT_TEMPLATES'):
            # assume that the index page has changed with the most current article or page
            # use the first article or page if no articles
            index_reference = None
            if len(articles_sorted) > 0:
                index_reference = articles_sorted[0]
            elif len(pages_sorted) > 0:
                index_reference = pages_sorted[0]

            if index_reference is not None:
                urls += self.__create_url_node_for_content(
                    index_reference,
                    'index',
                    url=self.url_site,
                )

        # process articles
        for article in articles_sorted:
            urls += self.__create_url_node_for_content(article, 'articles')

        # process pages
        for page in pages_sorted:
            urls += self.__create_url_node_for_content(page,
                                                       'pages',
                                                       url=urljoin(
                                                           self.url_site,
                                                           page.url))

        # process category pages
        if self.context.get('CATEGORY_URL'):
            urls += self.__process_url_wrapper_elements(
                self.context.get('categories'))

        # process tag pages
        if self.context.get('TAG_URL'):
            urls += self.__process_url_wrapper_elements(
                sorted(self.context.get('tags'), key=lambda x: x[0].name))

        # process author pages
        if self.context.get('AUTHOR_URL'):
            urls += self.__process_url_wrapper_elements(
                self.context.get('authors'))

        # write the final sitemap file
        with codecs_open(os.path.join(self.path_output, 'sitemap.xml'),
                         'w',
                         encoding='utf-8') as fd:
            fd.write(self.xml_wrap % {'SITEURL': self.url_site, 'urls': urls})
Example #51
0
"""
Packaging setup for ledcontroller
"""

# pylint: disable=line-too-long

import os.path
from codecs import open as codecs_open
from setuptools import setup

with codecs_open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), encoding='utf-8') as f:
    LONG_DESCRIPTION = f.read()

setup(
    name='ledcontroller',
    version='1.3.0',
    description='Controller library for limitlessled/easybulb/milight Wi-Fi LEDs',
    long_description=LONG_DESCRIPTION,
    url='https://github.com/ojarva/python-ledcontroller',
    author='Olli Jarva',
    author_email='*****@*****.**',
    license='BSD',

    classifiers=[
        'Development Status :: 5 - Production/Stable',
        'Topic :: Home Automation',
        'Intended Audience :: Developers',
        'License :: OSI Approved :: BSD License',

        'Programming Language :: Python :: 2.7',
        'Programming Language :: Python :: 3',
Example #52
0
    def get_tld_names(
        cls,
        fail_silently: bool = False,
        retry_count: int = 0
    ) -> Union[Dict[str, Trie], None]:
        """Parse.

        :param fail_silently:
        :param retry_count:
        :return:
        """
        if retry_count > 1:
            if fail_silently:
                return None
            else:
                raise TldIOError

        global tld_names
        _tld_names = tld_names
        # _tld_names = get_tld_names_container()

        # If already loaded, return
        if (
            cls.local_path in _tld_names
            and _tld_names[cls.local_path] is not None
        ):
            return _tld_names

        local_file = None
        try:
            # Load the TLD names file
            if isabs(cls.local_path):
                local_path = cls.local_path
            else:
                local_path = project_dir(cls.local_path)
            local_file = codecs_open(
                local_path,
                'r',
                encoding='utf8'
            )
            trie = Trie()
            trie_add = trie.add  # Performance opt
            # Make a list of it all, strip all garbage
            private_section = False

            for line in local_file:
                if '===BEGIN PRIVATE DOMAINS===' in line:
                    private_section = True

                # Puny code TLD names
                if '// xn--' in line:
                    line = line.split()[1]

                if line[0] in ('/', '\n'):
                    continue

                trie_add(
                    f'{line.strip()}',
                    private=private_section
                )

            update_tld_names_container(cls.local_path, trie)

            local_file.close()
        except IOError as err:
            # Grab the file
            cls.update_tld_names(
                fail_silently=fail_silently
            )
            # Increment ``retry_count`` in order to avoid infinite loops
            retry_count += 1
            # Run again
            return cls.get_tld_names(
                fail_silently=fail_silently,
                retry_count=retry_count
            )
        except Exception as err:
            if fail_silently:
                return None
            else:
                raise err
        finally:
            try:
                local_file.close()
            except Exception:
                pass

        return _tld_names
Example #53
0
# Default long description
LONG_DESCRIPTION = """

Pyramids Parser
===============

*Natural Language Semantic Extraction*

""".strip()

# Get the long description from the relevant file. First try README.rst,
# then fall back on the default string defined here in this file.
if path.isfile(path.join(HERE, 'README.rst')):
    with codecs_open(path.join(HERE, 'README.rst'),
                     encoding='utf-8',
                     mode='rU') as description_file:
        LONG_DESCRIPTION = description_file.read()

# See https://pythonhosted.org/setuptools/setuptools.html for a full list
# of parameters and their meanings.
setup(
    name='pyramids',
    version=__version__,
    author=__author__,
    author_email='*****@*****.**',
    url='https://github.com/hosford42/pyramids',
    license='MIT',
    platforms=['any'],
    description='Pyramids Parser: Natural Language Semantic Extraction',
    long_description=LONG_DESCRIPTION,
Example #54
0
"""setup.py"""

from codecs import open as codecs_open
from setuptools import setup

with codecs_open('README.md', 'r', 'utf-8') as f:
    __README = f.read()
with codecs_open('HISTORY.md', 'r', 'utf-8') as f:
    __HISTORY = f.read()

setup(
    name='jsonrpcclient',
    version='2.5.2',
    description='Send JSON-RPC requests',
    long_description=__README+'\n\n'+__HISTORY,
    author='Beau Barker',
    author_email='*****@*****.**',
    url='https://github.com/bcb/jsonrpcclient',
    license='MIT',
    packages=['jsonrpcclient'],
    package_data={'jsonrpcclient': ['response-schema.json']},
    include_package_data=True,
    install_requires=['future', 'jsonschema'],
    extras_require={
        'aiohttp': ['aiohttp'],
        'requests': ['requests'],
        'requests_security': ['requests[security]'],
        'tornado': ['tornado'],
        'unittest': ['requests', 'pyzmq', 'tornado', 'responses', \
            'testfixtures', 'mock'],
        'websockets': ['websockets'],
Example #55
0
def get_long_description():
    with codecs_open('README.rst', encoding='utf-8') as f:
        return f.read()
Example #56
0
	ntracks = len(intracks)
	for t in intracks:
		tracks[hash(t.tags) % ntracks].append(t.fname)
	
	track0 = choice(intracks).fname
	del intracks
	pls = Playlist(track0, ntracks)
	seen = set([track0])


	print >>stderr, 'Iteration started.'
	try:
		while len(pls) != args.count:
			i = pls.next()
			if not i in tracks:
				continue
			t = tracks[i]
			i = 0
			if len(t) > 1:
				i = pls.next(1) % len(t)
			if not t[i] in seen:
				seen.add(t[i])
				pls.append(t[i])
	except (KeyboardInterrupt, StopIteration):
		pass
	print >>stderr, 'Writing playlist file.'
	with closing(codecs_open(args.playlist, 'w', 'utf8')) as f:
		for t in pls.tracks:
			print >>f, t
Example #57
0
    return True


argparser = ArgumentParser(description='Run JSON5 parser tests')
argparser.add_argument('input', type=Path)
argparser.add_argument('output', nargs='?', type=Path)

if __name__ == '__main__':
    basicConfig(level=DEBUG)
    logger = getLogger(__name__)

    args = argparser.parse_args()
    try:
        # open() does not work with Paths in Python 3.5
        with codecs_open(str(args.input.resolve()), 'r', 'UTF-8') as f:
            data = f.read()
    except Exception:
        logger.error('Could not even read file: %s', args.input, exc_info=True)
        raise SystemExit(-1)

    try:
        obj = decode(data)
    except Exception:
        logger.error('Could not parse content: %s', args.input)
        raise SystemExit(1)

    try:
        json_obj = loads(data)
    except Exception:
        pass