Ejemplo n.º 1
0
def mkdir(client_pwd, args):
    if len(args) != 1:
        return "Error not enough arguments! Use: mkdir <argument>"

    path = os.path.normpath(format_path(client_pwd) + args[0])
    real_path = os.path.normpath(fake_root + path)

    if not os.path.exists(real_path):
        try:
            os.makedirs(real_path)
            return ""
        except:
            return "Unable to create directory!"
    else:
        return "Directory " + format_path(args[0]) + " already exists!"
Ejemplo n.º 2
0
def cp(client_pwd, connected_storages, args):
    if len(args) != 4:
        return "Wrong arguments. Use cp <SOURCE> <DEST>"
    remote_path = args[0]
    file_name = args[1]
    file_size = args[2]
    chunks = args[3]

    if remote_path == '.':
        remote_address = format_path(client_pwd) + file_name
    else:
        remote_address = (format_path(remote_path) if remote_path.startswith('/') else "/" + format_path(remote_path)) \
                         + file_name

    return generate_ip_pairs(remote_address, file_size, chunks,
                             connected_storages)
Ejemplo n.º 3
0
def plot_closing_adj(path_to_csv):
    '''plots the daily adjusted closing price vs. time'''
    data = pd.read_csv(format_path(path_to_csv), index_col='date')
    print('plotting data for ' + path_to_csv + '...')
    print('data dimensions ' + str(data.shape))
    plt.plot(data.index.values, data['adjusted'].values)
    plt.show()
Ejemplo n.º 4
0
def init_files(path):
    new_path = format_path(path)
    headers_with_token = {'Authorization': 'Bearer ' + cache['access_token']}
    url = f'https://graph.microsoft.com/v1.0/me/drive/root{new_path}children'
    r = requests.get(url, headers=headers_with_token)
    file_data = eval(r.text)
    if "error" in file_data:
        print(file_data['error']['message'])
    else:
        values = {}
        for value in file_data['value']:
            if value['name'].lower().strip() == 'readme.md':
                readme = requests.get(value['@microsoft.graph.downloadUrl']).text
                values['readme.md'] = render_markdown(readme)
            elif value['name'].lower().strip() == 'head.md':
                readme = requests.get(value['@microsoft.graph.downloadUrl']).text
                values['head.md'] = render_markdown(readme)
            else:
                if 'folder' in value:
                    folder = {
                        'time': format_time(value['lastModifiedDateTime']),
                        'size': format_size(value['size'])
                    }
                    values[value['name']] = folder
                    init_files(path.rstrip('/') + '/' + value['name'])
                elif 'file' in value:
                    file = {
                        'time': format_time(value['lastModifiedDateTime']),
                        'size': format_size(value['size']),
                        'url': value['@microsoft.graph.downloadUrl']
                    }
                    values[value['name']] = file

        cache_tmp['files'][path] = values
def evaluate_batch(symbols_file, data_path):
    '''prep data for evaluating'''
    symbols = []
    with open(format_path(symbols_file), 'r') as data:
        read_data = data.read()
        symbols = str(read_data).split()

    for symbol in symbols:
        test_data = pd.read_csv(format_path(data_path + '/' + symbol + '.csv'),
                                index_col='date')

        model_dir = format_path('output/models/' + symbol)

        evaluate(symbol, model_dir, test_data)

    print('batch evaluation finished')
Ejemplo n.º 6
0
    def _make_mvn_command_template(self, maven_config):
        """
        Construye la plantilla del comando en función de la config.
        Esta plantilla acepta los parámetros folder y goal
        """

        profiles = ''
        # TODO: arreglar el caso de que solo haya un valor (forcelist)
        if maven_config.get('profiles'):
            profiles = ' '.join(['-P', ','.join(maven_config['profiles'])])

        settings = ''
        if maven_config.get('settings_file'):
            settings = ' '.join(['-s', format_path(maven_config['settings_file'])])

        pom_file = ' '.join(['-f', '$folder' + self.pom_filename])

        flags_array = ['-B']
        if maven_config.get('quiet'):
            flags_array.append('-q')
        if maven_config.get('skip_tests'):
            flags_array.append('-DskipTests')
        if maven_config.get('flags'):
            flags_array.extend(maven_config['flags'])
        flags = ' '.join(flags_array)

        base_template_string = ' '.join(['mvn', flags, profiles, settings, pom_file, '$goal'])
        return Template(base_template_string)
Ejemplo n.º 7
0
def stat(client_pwd, args):
    if len(args) != 1:
        return "Error not enough arguments! Use: stat <argument>"

    path = os.path.normpath(format_path(client_pwd) + args[0])
    real_path = os.path.normpath(fake_root + path)

    if not os.path.exists(real_path):
        return "Error path " + path + "not exists!"
    else:
        if os.path.isdir(real_path):
            try:
                listdir = os.listdir(real_path)
                return "Directory " + path + " contains " + str(
                    len(listdir)) + " items"
            except Exception as e:
                return "Unable to get directory stats!"

        if os.path.isfile(real_path):
            try:
                with open(real_path) as f:
                    file_data = f.readline().split("|")
                    f.close()
                    return "File size: " + file_data[
                        1] + os.linesep + "Divided on: " + file_data[
                            2] + " chunks"
            except:
                return "Unable to get file stats!"
Ejemplo n.º 8
0
	def filterOptimizeFiles(self):
		self.filter_files = {}

		if os.path.isfile(self.path):
			head, tail = os.path.split(self.path)
			self.filter_files[os.path.normpath(self.path)] = tail
			self.path = head
			return

		for root, dirs, files in scandir.walk(self.path, topdown=True):
			dirs[:] = [d for d in dirs if utils.format_path(root, d) not in self.ignore_dirs]

			for file in files:
				if file.endswith('.py'):
					fullpath = utils.format_path(root, file)
					relpath = os.path.relpath(fullpath, self.path)
					self.filter_files[fullpath] = relpath
Ejemplo n.º 9
0
    def formatPath(self, root):
        self.config.IGNORE_DIRS = [
            utils.format_path(root, _path) for _path in self.config.IGNORE_DIRS
        ]
        self.config.IGNORE_FILES = [
            utils.format_path(root, _path)
            for _path in self.config.IGNORE_FILES
        ]
        self.config.INLINE_CONST = [
            utils.format_path(root, _path)
            for _path in self.config.INLINE_CONST
        ]
        self.config.CONFIG_INLINE_CONST = [
            utils.format_path(root, _path)
            for _path in self.config.CONFIG_INLINE_CONST
        ]
        self.config.INLINE_CONST_FILES = [
            utils.format_path(root, _path)
            for _path in self.config.INLINE_CONST_FILES
        ]

        routines = []
        for path, sys_path in self.config.SYS_PATH_ROUTINE:
            routines.append(
                (utils.format_path(root, path),
                 [utils.format_path(root, _path) for _path in sys_path]))
        self.config.SYS_PATH_ROUTINE = routines
Ejemplo n.º 10
0
def train_batch(symbols_file, data_path, export_dir):
    '''prep data for training'''
    # read from symbols file
    symbols = []
    with open(format_path(symbols_file), 'r') as data:
        read_data = data.read()
        symbols = str(read_data).split()

    for symbol in symbols:
        print('training neural network model for ' + symbol)
        train_data = pd.read_csv(format_path(data_path + '/train/' + symbol + '.csv'), index_col='date')
        test_data = pd.read_csv(format_path(data_path + '/test/' + symbol + '.csv'), index_col='date')

        model_dir = format_path(export_dir + '/' + symbol)
        remove_dir(model_dir)
        train(train_data, test_data, format_path(model_dir))

        print('training finished for ' + symbol)
Ejemplo n.º 11
0
def cp(response, temp_dir):
    original_file = response
    for i, line in enumerate(original_file.splitlines()):
        if i == 0:
            filepath = line.split('|')[0]
        else:
            #  send file to storage
            # format came from chunks_creator
            chunk_name = format_path(temp_dir) + os.path.basename(
                filepath) + '/chunk_' + str(i) + '.txt'
            chunk_data = format_path(filepath) + 'chunk_' + str(
                i) + '.txt' + os.linesep
            with open(chunk_name, 'r') as chunk:
                chunk_data += chunk.read() + os.linesep
            send_file_to_storage(line.split('|')[0], chunk_data)
            # send copy of file to storage
            if (line.split('|')[1] != ''):
                send_file_to_storage(line.split('|')[1], chunk_data)
Ejemplo n.º 12
0
def ls(client_pwd, args):
    if len(args) != 1:
        return "Error not enough arguments! Use: ls [<arguments>]"

    path = os.path.normpath(format_path(client_pwd) + args[0])
    real_path = os.path.normpath(fake_root + path)
    try:
        listdir = os.listdir(real_path)
    except Exception as e:
        return "Error incorrect path!!"
    return os.linesep.join(str(name) for name in listdir)
Ejemplo n.º 13
0
def mkdir(path):
    path = os.path.normpath(path)
    real_path = os.path.normpath(fake_root + path)

    if not os.path.exists(real_path):
        try:
            os.makedirs(real_path)
            return ""
        except:
            return "Unable to create directory!"
    else:
        return "Directory " + format_path(path) + " already exists!"
Ejemplo n.º 14
0
def cd(client_pwd, args):
    if len(args) != 1:
        return "Error not enough arguments! Use: cd <argument>"

    path = os.path.normpath(format_path(client_pwd) + args[0])
    real_path = os.path.normpath(fake_root + path)

    if os.path.exists(real_path):
        os.chdir(real_path)
        return path
    else:
        return client_pwd
Ejemplo n.º 15
0
def copy_config():
    config = utils.get_json_file_data('config.json')

    planConfigRootPath = utils.format_path(
        os.path.join(os.getcwd(), config['planConfigDir']))
    serverSrcPath = utils.format_path(
        os.path.join(planConfigRootPath, config['serverOnlyDirName']))
    commonSrcPath = utils.format_path(
        os.path.join(planConfigRootPath, config['commonDirName']))
    activitiesSrcPath = utils.format_path(
        os.path.join(planConfigRootPath, config['activitiesDirName']))

    dstPath = utils.format_path(
        os.path.join(os.getcwd(), config['serverConfigDir']))

    filterList = []
    for filterFile in config['serverNotCopy']:
        filterPath = utils.format_path(
            os.path.join(planConfigRootPath, filterFile))
        filterList.append(filterPath)

    utils.logger_info("更新配置......")
    os.system("svn update " + (planConfigRootPath).encode('gbk'))
    utils.logger_info("配置更新完成,准备拷贝!")

    utils.logger("配置源目录:")
    utils.logger(serverSrcPath.encode('utf-8'))
    utils.logger(commonSrcPath.encode('utf-8'))
    utils.logger(activitiesSrcPath.encode('utf-8'))
    utils.logger("拷贝到:" + dstPath.encode('utf-8'))
    utils.cover_copy_files(commonSrcPath, dstPath, filterList)
    utils.cover_copy_files(serverSrcPath, dstPath, filterList)
    utils.cover_copy_files(activitiesSrcPath, dstPath, filterList)
    utils.logger_info('配置拷贝结束!')
def fetch(symbols_file, indicators_file, output_path):
    '''fetches stock data combined with technical indicators, output as csv'''

    # read from symbols file
    stocks = []
    with open(utils.format_path(symbols_file), 'r') as data:
        read_data = data.read()
        stocks = str(read_data).split()

    # read from indicators file
    indicators = []
    with open(utils.1(indicators_file), 'r') as data:
        read_data = data.read()
        indicators = str(read_data).split()
Ejemplo n.º 17
0
def preprocess_batch(input_path, output_path, train_ratio):
    '''perform preprocessing on all input data csvs'''
    start = time.time()
    files = get_filename_list(input_path, 'csv')

    for file in files:
        symbol = file.split('.')[0]

        print("preprocessing " + symbol)

        data = pd.read_csv(format_path(input_path + '/' + file), index_col='date')

        train_data, test_data = preprocess(data, train_ratio)

        formatted_output = format_path(output_path)
        make_dir_if_not_exists(formatted_output + '/train')
        make_dir_if_not_exists(formatted_output + '/test')
        train_data.to_csv(formatted_output + '/train' + '/' + symbol + '.csv')
        test_data.to_csv(formatted_output + '/test' + '/' + symbol + '.csv')
        print('saved csv files to ' + formatted_output + '{train, test}/' + symbol + '.csv')

    print("preprocessing complete")
    elapsed = time.time() - start
    print('time elapsed: ' + str(round(elapsed, 2)) + " seconds")
Ejemplo n.º 18
0
def cat(client_pwd, connected_storages, args):
    if len(args) != 1:
        return "Error not enough arguments! Use: stat <argument>"
    # file in the pwd directory
    filepath = os.path.normpath(format_path(client_pwd) + args[0])
    real_path = os.path.normpath(fake_root + filepath)
    if not os.path.exists(real_path):
        return "Error path %s not exists!" % filepath
    else:
        with open(real_path, 'r') as read_file:
            full_file = ''
            lcount = 0
            for i, line in enumerate(read_file):
                print '%s %s' % (i, line)
                if i == 0:
                    remote_path = line.split('|')[0]
                    chunks = line.split('|')[2]
                    print "chunks = ", chunks
                if i > 0:
                    chunk_path = format_path(remote_path) + 'chunk_' + str(
                        i) + '.txt'
                    if ip_in_list(line.split('|')[0], connected_storages):
                        full_file += request_chunk_from_storage(
                            line.split('|')[0], chunk_path)
                    elif ip_in_list(line.split('|')[1], connected_storages):
                        full_file += request_chunk_from_storage(
                            line.split('|')[1], chunk_path)
                    else:
                        return "Some chunk of %s file is not exist" % filepath
                    lcount += 1
                    print lcount

        if lcount != int(chunks):
            return "Some chunk of %s file is not exist" % filepath
        else:
            return full_file
Ejemplo n.º 19
0
    def get_build_command(self, project_folder):
        base_command = "svn"
        result_command = ""

        project_folder = format_path(project_folder)

        if 'revert' in self.svn_config:
            result_command += base_command + " revert -R " + project_folder + "; "

        params = []
        if 'force_update' in self.svn_config:
            params.append('update --force')
        elif 'update' in self.svn_config:
            params.append('update')

        result_command += " ".join([base_command, *params, project_folder])
        return result_command
Ejemplo n.º 20
0
def rm(client_pwd, args):
    if len(args) != 1:
        return "Wrong number of arguments for rm"

    path = os.path.normpath(format_path(client_pwd) + args[0])
    real_path = os.path.normpath(fake_root + path)

    if not os.path.exists(real_path):
        return "Error path " + path + "not exists!"
    else:
        if os.path.isdir(real_path):
            try:
                shutil.rmtree(real_path)
                return ""
            except Exception as e:
                return "Unable remove directory"

        if os.path.isfile(real_path):
            try:
                os.remove(real_path)
                return ""
            except:
                return "Unable to remove file"
Ejemplo n.º 21
0
def create_chunks(file_object, temp_dir, chunk_size=1024):
    # Cause a single word can not be split into different chunks, count of bytes
    # could be changed according to the last word in previous chunk
    work_chunk_size = chunk_size
    with open(file_object, "rb") as in_file:
        # counter for out files
        num = 1
        # last word from previous chunk (if it was split)
        last_word = b''
        while True:
            chunk = last_word + in_file.read(work_chunk_size)
            if not chunk:
                break  # end of file
            # the next byte shows, is the last word in chunk full or not
            ch = in_file.read(1)
            if ch in [b' ', b'\n', b'\r', b'\t', b'']:
                last_word = ch
                work_chunk_size = chunk_size - 1
            else:
                last_word = bytes(re.split(' |\r\n|\n|\t', unicode(chunk))[-1])
                if last_word != chunk:
                    chunk = chunk[0:len(chunk) - len(last_word)]
                    last_word += ch  # add next byte
                    work_chunk_size = chunk_size - len(last_word) - 1
                else:
                    last_word = ch
                    work_chunk_size = chunk_size
            # create chunk in the temp directory with the path as original file
            chunk_name = format_path(temp_dir) + os.path.basename(
                file_object) + '/chunk_' + str(num) + '.txt'
            if not os.path.exists(os.path.dirname(chunk_name)):
                os.makedirs(os.path.dirname(chunk_name))
            with open(chunk_name, "wb") as out_file:
                out_file.write(chunk)
                num = num + 1
    return num - 1
Ejemplo n.º 22
0
    #    compress_flag=0

    cmd = ''
    if compress_flag <= 0:
        logger.info("Uncompress file %s" % input_file)
        target_dir = '.'
        if len(args) > 1:
            if os.path.isfile(args[1]):
                logger.error(
                    '"%s" is a file which should be a directory, please check')
                sys.exit()
            target_dir = args[1]
        else:
            if compress_flag == -1:
                target_dir = fname
        target_dir = utils.format_path(target_dir)
        if not os.path.exists(target_dir):
            os.makedirs(target_dir)  #create a directory using the filename

        fext = fext.lower()
        if fext in tar_name_list:
            compr = get_compr_app_cmd(fext)
            cmd += 'tar %sxvf "%s" -C "%s"' % (compr, input_file, target_dir)
        elif fext in zip_name_list:
            cmd += 'unzip "%s" -d "%s"' % (input_file, target_dir)

    elif compress_flag > 0:
        logger.info("Compress file %s" % input_file)
        #if len(extension)==0:
        #extension='.zip'#default compression extension
        if compress_flag == 2:
Ejemplo n.º 23
0
 def get_build_command(self, project_folder):
     return self.mvn_command_template.substitute(folder=format_path(project_folder),
                                                 goal=self.command_maven_goal)
            dfs.append(indicator_data)

        stock_indicators_joined = reduce(
            lambda left, right:
            pd.merge(
                left,
                right,
                left_index=True,
                right_index=True,
                how='outer'
            ), dfs)

        stock_indicators_joined.index.name = 'date'

        # print(stock_indicators_joined)

        print('fetched and joined data for ' + stock)

        formatted_output_path = utils.format_path(output_path)
        utils.make_dir_if_not_exists(output_path)
        stock_indicators_joined.to_csv(
            formatted_output_path + '/' + stock + '.csv')
        print('saved csv file to ' + formatted_output_path + '/' + stock + '.csv')

        elapsed = time.time() - start
        print('time elapsed: ' + str(round(elapsed, 2)) + " seconds")


if __name__ == '__main__':
    fetch(str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]))
Ejemplo n.º 25
0
    #if fext in compression_name_list:
    #    compress_flag=0

    cmd=''
    if compress_flag <= 0:
        logger.info("Uncompress file %s"%input_file)
        target_dir='.'
        if len(args)>1:
            if os.path.isfile(args[1]):
                logger.error('"%s" is a file which should be a directory, please check')
                sys.exit()
            target_dir=args[1]
        else:
            if compress_flag==-1:
                target_dir=fname
        target_dir=utils.format_path(target_dir)
        if not os.path.exists(target_dir):
           os.makedirs(target_dir)#create a directory using the filename

        fext=fext.lower()
        if fext in tar_name_list:
            compr=get_compr_app_cmd(fext)
            cmd+='tar %sxvf "%s" -C "%s"'%(compr,input_file,target_dir)
        elif fext in zip_name_list:
            cmd+='unzip "%s" -d "%s"'%(input_file,target_dir)

    elif compress_flag > 0:
        logger.info("Compress file %s"%input_file)
        #if len(extension)==0:
            #extension='.zip'#default compression extension
        if compress_flag ==2 :
Ejemplo n.º 26
0
def fetch(symbols_file, indicators_file, output_path):
    '''fetches stock data combined with technical indicators, output as csv'''

    # read from symbols file
    stocks = []
    with open(utils.format_path(symbols_file), 'r') as data:
        read_data = data.read()
        stocks = str(read_data).split()

    # read from indicators file
    indicators = []
    with open(utils.format_path(indicators_file), 'r') as data:
        read_data = data.read()
        indicators = str(read_data).split()

    stocks_config = {
        'function': constants.TIME_SERIES_DAILY_ADJUSTED,
        'output_size': constants.OUTPUTSIZE_FULL,
        'data_type': constants.DATATYPE_JSON,
        'api_key': constants.API_KEY
    }

    indicators_config = {
        'interval': constants.INTERVAL,
        'time_period': constants.TIME_PERIOD,
        'series_type': constants.SERIES_TYPE,
        'api_key': constants.API_KEY
    }

    for stock in stocks:
        start = time.time()

        stock_data = fetch_stock.fetch(stock, stocks_config)

        time.sleep(1)

        dfs = []
        dfs.append(stock_data)
        for indicator in indicators:
            indicator_data = fetch_indicators.fetch(indicator, stock,
                                                    indicators_config)

            time.sleep(1)

            dfs.append(indicator_data)

        stock_indicators_joined = reduce(
            lambda left, right: pd.merge(
                left, right, left_index=True, right_index=True, how='outer'),
            dfs)

        stock_indicators_joined.index.name = 'date'

        # print(stock_indicators_joined)

        print('fetched and joined data for ' + stock)

        formatted_output_path = utils.format_path(output_path)
        utils.make_dir_if_not_exists(output_path)
        stock_indicators_joined.to_csv(formatted_output_path + '/' + stock +
                                       '.csv')
        print('saved csv file to ' + formatted_output_path + '/' + stock +
              '.csv')

        elapsed = time.time() - start
        print('time elapsed: ' + str(round(elapsed, 2)) + " seconds")