Ejemplo n.º 1
0
def retrieve_feature_by_open_face(facial_image_path, feature_file_path):
    """Retrieve the deep feature by using open face.
    
    :param facial_image_path: the path of the facial image
    :type facial_image_path: string
    :param feature_file_path: the path of the feature file
    :type feature_file_path: string
    :return: the deep feature
    :rtype: numpy array
    """

    try:
        # Read feature directly from file
        if os.path.isfile(feature_file_path):
            feature = common.read_from_file(feature_file_path)
            return feature

        # Retrieve feature
        assert os.path.isfile(facial_image_path)
        facial_image_in_BGR = cv2.imread(facial_image_path)
        facial_image_in_BGR = cv2.resize(facial_image_in_BGR,
                                         dsize=(args.imgDim, args.imgDim))
        facial_image_in_RGB = cv2.cvtColor(facial_image_in_BGR,
                                           cv2.COLOR_BGR2RGB)
        feature = net.forward(facial_image_in_RGB)

        # Successful case. Save feature to file.
        assert feature is not None
        common.write_to_file(feature_file_path, feature)
        return feature
    except:
        # Failure case
        return None
Ejemplo n.º 2
0
def create_out_file():

  file_data = []
  header_line = 'Rank Type,Rank,MA Type,MA Period,Investment,Wealth,Absolute Return,Annualized Return,Sharpe'
  file_data.append(header_line)
  ranked_file_path = os.path.join(output_dir, output_file)
  common.write_to_file(ranked_file_path, file_data)
Ejemplo n.º 3
0
def stage_flag_20():
    common.write_to_file(common.FLAG_FILE_20, get_formatted_flag_line(20))

    # Restrict read access to root user only (specifically, restrict read
    # access to the owning use, but since the script is being executed as root,
    # the owner of this file will be the root user)
    os.chmod(common.FLAG_FILE_20, 0400) 
Ejemplo n.º 4
0
def persist_rollback_info(deployments,
                          rollback_filename_template="rollback-info_",
                          rollback_file_dir=None):
    """Write name, runtime_name and server group of all enabled
    deployments to be replaced to a file named rollback-info_<timestamp>.
    """
    if not deployments:
        return

    directory = rollback_file_dir if rollback_file_dir else \
        os.path.dirname(os.path.abspath(__file__))

    rollback_info_file = directory + os.path.sep + \
        rollback_filename_template + str(int(round(time.time() * 1000)))
    deployment_line_template = "{0} {1} {2}\n"
    rollback_info = ""

    for deployment in deployments:
        line = deployment_line_template.format(
            deployment.name,
            deployment.runtime_name,
            deployment.server_group.name if deployment.server_group is not None else None
        )
        rollback_info += line

    common.write_to_file(rollback_info_file, rollback_info)

    return rollback_info_file
Ejemplo n.º 5
0
def retrieve_feature_by_vgg_face(facial_image_path, feature_file_path):
    """Retrieve the deep feature by using vgg face.
    
    :param facial_image_path: the path of the facial image
    :type facial_image_path: string
    :param feature_file_path: the path of the feature file
    :type feature_file_path: string
    :return: the deep feature
    :rtype: numpy array
    """

    try:
        # Read feature directly from file
        if os.path.isfile(feature_file_path):
            feature = common.read_from_file(feature_file_path)
            return feature

        # Retrieve feature
        assert os.path.isfile(facial_image_path)
        facial_image = cv2.imread(facial_image_path)
        facial_image = cv2.resize(facial_image,
                                  dsize=(common.VGG_FACE_IMAGE_SIZE,
                                         common.VGG_FACE_IMAGE_SIZE))
        facial_image = facial_image.astype(np.float32)
        _ = net.predict([facial_image], oversample=False).ravel()
        feature = net.blobs["fc7"].data[0]

        # Successful case. Save feature to file.
        assert feature is not None
        common.write_to_file(feature_file_path, feature)
        return feature
    except:
        # Failure case
        return None
Ejemplo n.º 6
0
def create_out_file():

  file_data = []
  header_line = 'Type,Rank,Investment,Wealth,AbsoluteReturn,AnnualizedReturn,Sharpe'
  file_data.append(header_line)
  ranked_file_path = os.path.join(output_dir, output_file)
  common.write_to_file(ranked_file_path, file_data)
Ejemplo n.º 7
0
def retrieve_feature_by_vgg_face(facial_image_path, feature_file_path):
    """Retrieve the deep feature by using vgg face.
    
    :param facial_image_path: the path of the facial image
    :type facial_image_path: string
    :param feature_file_path: the path of the feature file
    :type feature_file_path: string
    :return: the deep feature
    :rtype: numpy array
    """

    try:
        # Read feature directly from file
        if os.path.isfile(feature_file_path):
            feature = common.read_from_file(feature_file_path)
            return feature

        # Retrieve feature
        assert os.path.isfile(facial_image_path)
        facial_image = cv2.imread(facial_image_path)
        facial_image = cv2.resize(facial_image, dsize=(common.VGG_FACE_IMAGE_SIZE, common.VGG_FACE_IMAGE_SIZE))
        facial_image = facial_image.astype(np.float32)
        _ = net.predict([facial_image], oversample=False).ravel()
        feature = net.blobs["fc7"].data[0]

        # Successful case. Save feature to file.
        assert feature is not None
        common.write_to_file(feature_file_path, feature)
        return feature
    except:
        # Failure case
        return None
Ejemplo n.º 8
0
def retrieve_feature_by_open_face(facial_image_path, feature_file_path):
    """Retrieve the deep feature by using open face.
    
    :param facial_image_path: the path of the facial image
    :type facial_image_path: string
    :param feature_file_path: the path of the feature file
    :type feature_file_path: string
    :return: the deep feature
    :rtype: numpy array
    """

    try:
        # Read feature directly from file
        if os.path.isfile(feature_file_path):
            feature = common.read_from_file(feature_file_path)
            return feature

        # Retrieve feature
        assert os.path.isfile(facial_image_path)
        facial_image_in_BGR = cv2.imread(facial_image_path)
        facial_image_in_BGR = cv2.resize(facial_image_in_BGR, dsize=(args.imgDim, args.imgDim))
        facial_image_in_RGB = cv2.cvtColor(facial_image_in_BGR, cv2.COLOR_BGR2RGB)
        feature = net.forward(facial_image_in_RGB)

        # Successful case. Save feature to file.
        assert feature is not None
        common.write_to_file(feature_file_path, feature)
        return feature
    except:
        # Failure case
        return None
Ejemplo n.º 9
0
def stage_flag_7():
    with open(common.FLAG_7_SOURCE_FILE, 'r') as f:
        file_lines = f.read().splitlines()

    num_lines = len(file_lines)
    flag_injection_location = (num_lines / 3) * 2
    file_lines.insert(flag_injection_location, get_formatted_flag_line(7))
    common.write_to_file(common.FLAG_FILE_7, "\n".join(file_lines))
Ejemplo n.º 10
0
def run():
  
  file_data = []
  header_line = 'Category,Inv Period,Return,Std Dev,Sharpe'
  file_data.append(header_line)

  for header in sorted(out_file_dict, key=sort_val):
    
    out_file = out_file_dict[header]
    out_file_path = os.path.join('output', out_file)
    out_data = common.read_from_file(out_file_path)
    del out_data[0]
    
    inv_period_data = []
    ret_data = []
    sharpe_data = []
    
    for r in out_data:
      
      row_data = r.split(',')
      inv_period_data.append(float(row_data[2]))
      ret_data.append(float(row_data[5]))
      sharpe_data.append(float(row_data[6]))
    
    inv_period = numpy.mean(inv_period_data)
    ret = numpy.mean(ret_data)
    stdev = numpy.std(ret_data)
    sharpe = numpy.mean(sharpe_data)
    
    line_data = header + ',' + str(inv_period) + ',' + str(ret) + ',' \
      + str(stdev) + ',' + str(sharpe)
    file_data.append(line_data)

  rank_file = os.path.join('output', 'ranked.csv')
  rank_data = common.read_from_file(rank_file)
  del rank_data[0]
  
  for r in rank_data:
  
    row_data = r.split(',')
    category = row_data[0].capitalize() + ' ' + row_data[1]
    ret = row_data[5]
    sharpe = row_data[6]
    
    line_data = category + ',1.0,' + ret + ',,' + sharpe
    file_data.append(line_data)
    
  equalWt_file = os.path.join('output', 'equalWt.csv')
  equalWt_data = common.read_from_file(equalWt_file)
  del equalWt_data[0]
  row_data = equalWt_data[0].split(',')
  
  line_data = 'Equal Weighted,1.0,' + row_data[3] + ',,' + row_data[4]
  file_data.append(line_data)
    
  summary_file = os.path.join('output', 'summary.csv')
  common.write_to_file(summary_file, file_data)
Ejemplo n.º 11
0
def main():
    test_data_vector = [
        solve_chain_mass_nmpc_qp(num_masses=num_masses,
                                 num_intervals=num_intervals)
        for num_masses in range(1, 4) for num_intervals in [10, 20]
    ]

    header = common.get_test_data_header(test_data_vector)
    common.write_to_file(header, sys.argv[1])
Ejemplo n.º 12
0
def main():
    test_data_vector = [
        solve_hanging_chain_qp(num_masses=num_masses,
                               use_contraints=use_contraints)
        for num_masses in range(5, 100, 5) for use_contraints in [True, False]
    ]

    header = common.get_test_data_header(test_data_vector)
    common.write_to_file(header, sys.argv[1])
Ejemplo n.º 13
0
def remove_from_list(list, file):
    index = find_list(list, file)
    if index >= 0:
        content = read_from_file(file)
        lines = content.split('\n')
        lines.pop(index)
        s = ''
        for line in lines:
            if len(line) > 0:
                s = s + line + '\n'
        write_to_file(file, s)
Ejemplo n.º 14
0
def stage_flag_8():
    common.mkdir(common.FLAG_8_DIRECTORY)
    filenames_dict = common.get_flag_8_filenames()
    files_to_create = filenames_dict['all_files']
    flag_file = filenames_dict['flag_file']

    for filepath in files_to_create:
        common.write_to_file(filepath, "duck\n")

    # Overwrite the contents of the flag file
    common.write_to_file(flag_file, "goose\n" + get_formatted_flag_line(8))
Ejemplo n.º 15
0
def unsubscribe(name, mode):
    index = subscription_index(name, mode)
    if index >= 0:
        content = read_from_file(SUBSCRIPTION_FILE)
        lines = content.split('\n')
        lines.pop(index)
        s = ''
        for line in lines:
            if len(line) > 0:
                s = s + line + '\n'
        write_to_file(SUBSCRIPTION_FILE, s)
Ejemplo n.º 16
0
def query_influxdb(ctx, query, img_handle_queue, condition):
    try:
        logger.info(f'Initializing message bus context')
        client_ctx = ctx.get_client_by_index(0)
        config = client_ctx.get_msgbus_config()
        interface_value = client_ctx.get_interface_value("Name")
        msgbus = mb.MsgbusContext(config)
        logger.info(
            'Initializing service for topic \'{"influxconnector_service"}\'')
        service = msgbus.get_service(interface_value)
        request = {'command': query}
        logger.info(f'Running...')
        logger.info(f'Sending request {request}')
        service.request(request)
        logger.info('Waiting for response')
        response, _ = service.recv()
        if len(response['Data']) > 0:
            loaded_json = json.loads(response['Data'])
            index = -1
            valid_input = [
                'channels', 'defects', 'encoding_level', 'encoding_type',
                'height', 'width'
            ]
            check = all(item in loaded_json['columns'] for item in valid_input)
            if check is True:
                for key in loaded_json['columns']:
                    if key == "img_handle":
                        index = (loaded_json['columns'].index(key))
            if index >= 0:
                for elm in loaded_json['values']:
                    temp_dict = dict()
                    count = 0
                    for key in loaded_json['columns']:
                        temp_dict[key] = elm[count]
                        count = count + 1

                    is_queue_empty = img_handle_queue.empty()
                    img_handle_queue.put(temp_dict)
                    if is_queue_empty:
                        with condition:
                            condition.notifyAll()

            output_dir = "/output" + "/" + "data"
            now = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
            filename = str(now) + ".dat"
            common.write_to_file(filename, str.encode(response['Data']),
                                 output_dir)
        with condition:
            condition.notifyAll()

        service.close()
    except KeyboardInterrupt:
        logger.info(f' Quitting...')
        service.close()
Ejemplo n.º 17
0
def remove_search_query(query):
    index = find_search_query(query)
    if index >= 0:
        content = read_from_file(SEARCH_FILE)
        lines = content.split('\n')
        lines.pop(index)
        s = ''
        for line in lines:
            if len(line) > 0:
                s = s + line + '\n'
        write_to_file(SEARCH_FILE, s)
Ejemplo n.º 18
0
def _run():
    """ Write all _id-s from ip collection to file,
    record in another file the elapsed time.
    To fetch the data find/limit is used (using the _id of the last
    processed document as filter)
    
    !!! THIS APPROACH IS APPLICABLE ONLY IF THE _id IS OF TYPE ObjectId
    """

    conf = common.get_conf('conf')
    if not conf:
        return

    db = common.get_db(conf)
    db_collection = db.ip
    criteria = {'isActive.ts.h': {'$gt': datetime(1991, 1, 1)}}
    fetch_fields = {'_id': 1, 'isActive.ts.h': 1}
    batch_limit = 1000  # TODO try with 3000
    all_docs_count = db_collection.count(criteria)
    write_limit = 50000
    ids_to_write = []
    sort_field = [('isActive.ts.h', pymongo.ASCENDING)]
    docs = list(db_collection.find(criteria).sort(sort_field).limit(1))
    if not docs:
        print('Collection %s is empty' % db_collection)
        return

    last_h = docs[0]['isActive']['ts']['h']
    ids_to_write.append(str(docs[0]['_id']))
    processed_docs_count = 1
    while True:
        criteria = {'isActive.ts.h': {'$gt': last_h}}
        docs = list(
            common.retry_run(
                db_collection.find(criteria,
                                   fetch_fields).sort(sort_field).limit,
                batch_limit))
        if not docs:
            break

        last_h = docs[-1]['isActive']['ts']['h']
        ids_to_write.extend([str(doc['_id']) for doc in docs])
        if len(ids_to_write) > write_limit:
            common.write_to_file(FILE_NAME, ids_to_write)
            ids_to_write = []

        processed_docs_count += len(docs)
        percent = (processed_docs_count * 100.) / all_docs_count
        print(' * Processed %d/%d [%6.2f]' %
              (processed_docs_count, all_docs_count, percent))

    if ids_to_write:
        common.write_to_file(FILE_NAME, ids_to_write)
Ejemplo n.º 19
0
def save_inv_data(units_dict):
  
  inv_data = []
  header_line = 'Fund,Units'
  inv_data.append(header_line)
  for fund in sorted(units_dict):
    line_data = fund + ',' + str(units_dict[fund])
    inv_data.append(line_data)
  
  inv_data_file = 'invData' + str(type).capitalize() + str(rank) + '.csv'
  inv_data_file_path = os.path.join(data_dir, inv_data_file)
  common.write_to_file(inv_data_file_path, inv_data)
Ejemplo n.º 20
0
def save_inv_data(units_dict):
  
  inv_data = []
  header_line = 'Fund,Units'
  inv_data.append(header_line)
  for fund in sorted(units_dict):
    line_data = fund + ',' + str(units_dict[fund])
    inv_data.append(line_data)
  
  inv_data_file = 'invData' + str(rank_type).capitalize() + str(rank) + '.csv'
  inv_data_file_path = os.path.join(data_dir, inv_data_file)
  common.write_to_file(inv_data_file_path, inv_data)
Ejemplo n.º 21
0
def save():

    file_data = []
    header_line = 'Investment,Wealth,AbsoluteReturn,AnnualizedReturn,Sharpe'
    file_data.append(header_line)

    (investment, wealth, abs_return, ann_return, sharpe) = stats_data

    line_data = str(investment) + ',' + str(wealth) + ',' + str(abs_return) \
      + ',' + str(ann_return) + ',' + str(sharpe)
    file_data.append(line_data)

    out_file_path = os.path.join('output', 'equalWt.csv')
    common.write_to_file(out_file_path, file_data)
Ejemplo n.º 22
0
def add_to_list(list, file):
    if find_list(list, file) >= 0:
        return

    if os.path.isfile(file):
        content = read_from_file(file)
    else:
        content = ""

    lines = content.split('\n')
    s = '%s\n' % list
    for line in lines:
        if len(line) > 0:
            s = s + line + '\n'
    write_to_file(file, s)
Ejemplo n.º 23
0
def add_search_query(query):
    if find_search_query(query) >= 0:
        return

    if os.path.isfile(SEARCH_FILE):
        content = read_from_file(SEARCH_FILE)
    else:
        content = ""

    lines = content.split('\n')
    s = '%s\n' % query
    for line in lines:
        if len(line) > 0:
            s = s + line + '\n'
    write_to_file(SEARCH_FILE, s)
Ejemplo n.º 24
0
def stage_flag_18():
    common.mkdir(common.FLAG_DIRECTORY_18_ROOT)

    chosen_box_number = random.choice(common.FLAG_18_BOX_NUMS)
    chosen_box_directory = None
    for box_num in common.FLAG_18_BOX_NUMS:
        subdir = "box_%d" % (box_num)
        subdir_path = os.path.join(common.FLAG_DIRECTORY_18_ROOT, subdir)

        if box_num == chosen_box_number:
            chosen_box_directory = subdir_path
            
        common.mkdir(subdir_path)

    path = os.path.join(chosen_box_directory, common.FLAG_FILE_18_NAME)
    common.write_to_file(path, get_formatted_flag_line(18))
Ejemplo n.º 25
0
def save():

  file_data = []
  header_line = 'Fund,Investment,InvPeriod,Wealth,AbsoluteReturn,AnnualizedReturn,Sharpe'
  file_data.append(header_line)
  
  for fund in sorted(fund_names):
    (investment, wealth, abs_return, ann_return) = perf_dict[fund]
    sharpe = risk_dict[fund]

    line_data = fund + ',' + str(investment) + ',1.0,' + str(wealth) + ',' \
      + str(abs_return) + ',' + str(ann_return) + ',' + str(sharpe)
    file_data.append(line_data)
    
  out_file = os.path.join(output_dir, output_file)
  common.write_to_file(out_file, file_data)
Ejemplo n.º 26
0
def save():

  file_data = []
  header_line = 'Fund,Investment,InvPeriod,Wealth,AbsoluteReturn,AnnualizedReturn,Sharpe'
  file_data.append(header_line)
  
  for fund in sorted(fund_names):
  
    (investment, wealth, abs_return, ann_return, stop_inv, sharpe) = stats_dict[fund]
    total_period = num_rows - 14
    inv_period = stop_inv * 1.0 / total_period
    line_data = fund + ',' + str(investment) + ',' + str(inv_period) + ',' \
      + str(wealth) + ',' + str(abs_return) + ',' + str(ann_return) + ',' \
      + str(sharpe)
    file_data.append(line_data)
    
  out_file_path = os.path.join('output', 'flexStp.csv')
  common.write_to_file(out_file_path, file_data)
Ejemplo n.º 27
0
def save():

    file_data = []
    header_line = 'Fund,Investment,InvPeriod,Wealth,AbsoluteReturn,AnnualizedReturn,Sharpe'
    file_data.append(header_line)

    for fund in sorted(fund_names):

        (investment, wealth, abs_return, ann_return, stop_inv,
         sharpe) = stats_dict[fund]
        total_period = num_rows - 14
        inv_period = stop_inv * 1.0 / total_period
        line_data = fund + ',' + str(investment) + ',' + str(inv_period) + ',' \
          + str(wealth) + ',' + str(abs_return) + ',' + str(ann_return) + ',' \
          + str(sharpe)
        file_data.append(line_data)

    out_file_path = os.path.join('output', 'flexStp.csv')
    common.write_to_file(out_file_path, file_data)
Ejemplo n.º 28
0
def _run():
    """ Write all _id-s from ip collection to file,
    record in another file the elapsed time.
    To fetch the data skip/limit is used
    """
    
    conf = common.get_conf('conf')
    if not conf:
        return

    db = common.get_db(conf)
    db_collection = db.ip
    criteria = {}
    fetch_fields = {
        '_id': 1
    }
    offset = 0
    batch_limit = 1000  # TODO try with 3000
    all_docs_count = db_collection.count(criteria)
    processed_docs_count = 0
    write_limit = 50000
    ids_to_write = []
    while True:
        docs = list(common.retry_run(
            db_collection.find(criteria, fetch_fields).skip(offset).limit,
            batch_limit))
        
        if not docs:
            break

        offset += batch_limit
        ids_to_write.extend([str(doc['_id']) for doc in docs])
        if len(ids_to_write) > write_limit:
            common.write_to_file(FILE_NAME, ids_to_write)
            ids_to_write = []

        processed_docs_count += len(docs)
        percent = (processed_docs_count * 100.) / all_docs_count
        print(' * Processed %d/%d [%6.2f]' %
              (processed_docs_count, all_docs_count, percent))

    if ids_to_write:
        common.write_to_file(FILE_NAME, ids_to_write)
Ejemplo n.º 29
0
def download_movie_meta(imdb_id, path):
    (data_file, poster_file, fanart_file, poster_missing,
     fanart_missing) = _get_meta_paths(imdb_id, path)
    if not os.path.isfile(data_file) or not os.path.isfile(
            poster_file) or not os.path.isfile(fanart_file):
        info = TMDBInfo(imdb_id=imdb_id)
        title = info.name()
        try:
            year = info.released().split('-')[0]
        except:
            year = ""

        genre = ""
        try:
            for category in info.categories():
                genre += category + ","
            genre = genre[:-1]
        except:
            genre = ""

        tagline = info.tagline()
        overview = info.overview()

        try:
            duration = int(
                info.runtime()
            )  #"%d:%02d" % (int(info.runtime()) / 60, int(info.runtime()) % 60)
        except:
            duration = ""

        rating = info.rating()
        votes = info.votes()
        premiered = info.released()
        mpaa = info.certification()

        content = '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s' % (
            title, year, genre, tagline, overview, duration, rating, votes,
            premiered, mpaa)
        write_to_file(data_file, content)

        poster_url = info.poster()

        if not os.path.isfile(poster_file) or not os.path.isfile(
                poster_missing):
            if USE_POSTERS:
                try:
                    urllib.urlretrieve(poster_url, poster_file)
                except:
                    write_to_file(poster_missing, '')

        fanart_url = info.fanart()

        if not os.path.isfile(fanart_file) or not os.path.isfile(
                fanart_missing):
            if USE_FANART:
                try:
                    urllib.urlretrieve(fanart_url, fanart_file)
                except:
                    write_to_file(fanart_missing, '')
Ejemplo n.º 30
0
def compute_rank():

  global data_dict, sharpe_rank_data
  sharpe_data = common.get_sharpe_data()
  
  sharpe_rank_data = []
  header_line = 'Date,Fund'
  sharpe_rank_data.append(header_line)
  
  for i,r in enumerate(sharpe_data):
  
    if i == 0: continue
    dt = r.split(',')[0]
    data_line = r.split(',')[1:]
    data_dict = common.get_fund_nav_dict(fund_names, data_line)
    sorted_funds = sorted(fund_names, key=sort_fn, reverse=True)
    
    line_data = dt + ',' + sorted_funds[index]
    sharpe_rank_data.append(line_data)

  sharpe_rank_file = 'sharpeRank' + str(rank_type).capitalize() + str(rank) + '.csv'
  sharpe_rank_file_path = os.path.join(data_dir, sharpe_rank_file)
  common.write_to_file(sharpe_rank_file_path, sharpe_rank_data)
Ejemplo n.º 31
0
def run(nav_file):
    """
  Generates monthly sharpe ratio for each fund using a rolling window of the 
  last 12 months. Uses this data to generate a rank file that specifies which 
  fund to invest in each month. The fund chosen each month is the one with the 
  highest sharpe ratio.
  """

    # create data directory
    common.create_dir(data_dir)

    # read nav data
    nav_data = common.read_from_file(nav_file)

    # generate monthly sharpe ratio
    sharpe_data = get_sharpe_data(nav_data)
    sharpe_data_file = os.path.join(data_dir, sharpe_data_file_name)
    common.write_to_file(sharpe_data_file, sharpe_data)

    # generate sharpe ranking
    sharpe_rank_data = get_sharpe_rank_data(nav_data, sharpe_data)
    sharpe_rank_data_file = os.path.join(data_dir, sharpe_rank_file_name)
    common.write_to_file(sharpe_rank_data_file, sharpe_rank_data)
Ejemplo n.º 32
0
def compute_rank():

  global data_dict, sharpe_rank_data
  sharpe_data = common.get_sharpe_data()
  
  sharpe_rank_data = []
  header_line = 'Date,Fund'
  sharpe_rank_data.append(header_line)
  
  for i,r in enumerate(sharpe_data):
  
    if i == 0: continue
    dt = r.split(',')[0]
    data_line = r.split(',')[1:]
    data_dict = common.get_fund_nav_dict(fund_names, data_line)
    sorted_funds = sorted(fund_names, key=sort_fn, reverse=True)
    
    line_data = dt + ',' + sorted_funds[index]
    sharpe_rank_data.append(line_data)

  sharpe_rank_file = 'sharpeRank' + str(type).capitalize() + str(rank) + '.csv'
  sharpe_rank_file_path = os.path.join(data_dir, sharpe_rank_file)
  common.write_to_file(sharpe_rank_file_path, sharpe_rank_data)
Ejemplo n.º 33
0
def run(nav_file):
  """
  Generates monthly sharpe ratio for each fund using a rolling window of the 
  last 12 months. Uses this data to generate a rank file that specifies which 
  fund to invest in each month. The fund chosen each month is the one with the 
  highest sharpe ratio.
  """
  
  # create data directory
  common.create_dir(data_dir)
  
  # read nav data
  nav_data = common.read_from_file(nav_file)
  
  # generate monthly sharpe ratio
  sharpe_data = get_sharpe_data(nav_data)
  sharpe_data_file = os.path.join(data_dir, sharpe_data_file_name)
  common.write_to_file(sharpe_data_file, sharpe_data)

  # generate sharpe ranking
  sharpe_rank_data = get_sharpe_rank_data(nav_data, sharpe_data)
  sharpe_rank_data_file = os.path.join(data_dir, sharpe_rank_file_name)
  common.write_to_file(sharpe_rank_data_file, sharpe_rank_data)
Ejemplo n.º 34
0
def download_movie_meta(imdb_id, path):
    (data_file, poster_file, fanart_file, poster_missing, fanart_missing) = _get_meta_paths(imdb_id, path)
    if not os.path.isfile(data_file) or not os.path.isfile(poster_file) or not os.path.isfile(fanart_file):
            info = TMDBInfo(imdb_id=imdb_id)
            title = info.name()
            try:
                year = info.released().split('-')[0]
            except:
                year = ""
            
            genre = ""
            try:
                for category in info.categories():
                    genre += category + ","
                genre = genre[:-1]
            except:
                genre = ""
    
            tagline = info.tagline()
            overview = info.overview()
    
            try:
                duration = int(info.runtime())#"%d:%02d" % (int(info.runtime()) / 60, int(info.runtime()) % 60)
            except:
                duration = ""
            
            rating = info.rating()
            votes = info.votes()
            premiered = info.released()
            mpaa = info.certification()
    
            content = '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s' % (title, year, genre, tagline, overview, duration, rating, votes, premiered, mpaa)
            write_to_file(data_file, content)
            
            poster_url = info.poster()
            
            if not os.path.isfile(poster_file) or not os.path.isfile(poster_missing):
                if USE_POSTERS:
                    try:
                        urllib.urlretrieve(poster_url, poster_file)
                    except:
                        write_to_file(poster_missing, '')

            
            fanart_url = info.fanart()
            
            if not os.path.isfile(fanart_file) or not os.path.isfile(fanart_missing):
                if USE_FANART:
                    try:
                        urllib.urlretrieve(fanart_url, fanart_file)
                    except:
                        write_to_file(fanart_missing, '')
Ejemplo n.º 35
0
def download_tv_show_meta(imdb_id, path):
    (data_file, poster_file, fanart_file, poster_missing,
     fanart_missing) = _get_meta_paths(imdb_id, path)
    if not os.path.isfile(data_file) or not os.path.isfile(
            poster_file) or not os.path.isfile(fanart_file):
        info = TheTVDBInfo(imdb_id)
        title = info.SeriesName()
        year = info.FirstAired().split('-')[0]
        genre = info.Genre()
        overview = info.Overview()
        rating = info.Rating()
        votes = info.RatingCount()
        premiered = info.FirstAired()
        mpaa = info.ContentRating()

        content = '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s' % (
            title, year, genre, overview, rating, votes, premiered, mpaa)
        write_to_file(data_file, content)

        if not os.path.isfile(poster_file) or not os.path.isfile(
                poster_missing):
            if USE_POSTERS:
                if META_QUALITY == 'low':
                    image_base_url = 'http://thetvdb.com/banners/_cache/'
                else:
                    image_base_url = 'http://thetvdb.com/banners/'
                poster_href = info.poster()
                if len(poster_href) > 0:
                    poster = '%s%s' % (image_base_url, poster_href)
                    try:
                        urllib.urlretrieve(poster, poster_file)
                    except:
                        pass
                else:
                    write_to_file(poster_missing, '')

        if not os.path.isfile(fanart_file) or not os.path.isfile(
                fanart_missing):
            if USE_FANART:
                if META_QUALITY == 'low':
                    image_base_url = 'http://thetvdb.com/banners/_cache/'
                else:
                    image_base_url = 'http://thetvdb.com/banners/'
                fanart_href = info.fanart()
                if len(fanart_href) > 0:
                    fanart = '%s%s' % (image_base_url, fanart_href)
                    try:
                        urllib.urlretrieve(fanart, fanart_file)
                    except:
                        pass
                else:
                    write_to_file(fanart_missing, '')
Ejemplo n.º 36
0
def download_tv_show_meta(imdb_id, path):
    (data_file, poster_file, fanart_file, poster_missing, fanart_missing) = _get_meta_paths(imdb_id, path)
    if not os.path.isfile(data_file) or not os.path.isfile(poster_file) or not os.path.isfile(fanart_file):
            info = TheTVDBInfo(imdb_id)
            title = info.SeriesName()
            year = info.FirstAired().split('-')[0]
            genre = info.Genre()
            overview = info.Overview()
            rating = info.Rating()
            votes = info.RatingCount()
            premiered = info.FirstAired()
            mpaa = info.ContentRating()
    
            content = '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s' % (title, year, genre, overview, rating, votes, premiered, mpaa)
            write_to_file(data_file, content)
            
            if not os.path.isfile(poster_file) or not os.path.isfile(poster_missing):
                if USE_POSTERS:
                    if META_QUALITY == 'low':
                        image_base_url = 'http://thetvdb.com/banners/_cache/'
                    else:
                        image_base_url = 'http://thetvdb.com/banners/'
                    poster_href = info.poster()
                    if len(poster_href) > 0:
                        poster = '%s%s' % (image_base_url, poster_href)
                        try:
                            urllib.urlretrieve(poster, poster_file)
                        except:
                            pass    
                    else:
                        write_to_file(poster_missing, '')
            
            if not os.path.isfile(fanart_file) or not os.path.isfile(fanart_missing):
                if USE_FANART:
                    if META_QUALITY == 'low':
                        image_base_url = 'http://thetvdb.com/banners/_cache/'
                    else:
                        image_base_url = 'http://thetvdb.com/banners/'
                    fanart_href = info.fanart()
                    if len(fanart_href) > 0:
                        fanart = '%s%s' % (image_base_url, fanart_href)
                        try:
                            urllib.urlretrieve(fanart, fanart_file)
                        except:
                            pass
                    else:
                        write_to_file(fanart_missing, '')
Ejemplo n.º 37
0
def download_movie_meta(imdb_id, path):
    (data_file, poster_file, fanart_file, poster_missing, fanart_missing) = _get_meta_paths(imdb_id, path)
    if not os.path.isfile(data_file) or not os.path.isfile(poster_file) or not os.path.isfile(fanart_file):
            info = TMDBInfo(imdb_id=imdb_id)
            title = info.name()
            year = info.released().split('-')[0]
            
            genre = ""
            for category in info.categories():
                genre += category + ","
            genre = genre[:-2]
    
            tagline = info.tagline()
            overview = info.overview()
    
            try:
                duration = "%d:%02d" % (int(info.runtime()) / 60, int(info.runtime()) % 60)
            except:
                duration = ""
            
            rating = info.rating()
            votes = info.votes()
            premiered = info.released()
            mpaa = info.certification()
    
            content = '%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s' % (title, year, genre, tagline, overview, duration, rating, votes, premiered, mpaa)
            write_to_file(data_file, content)
            
            images = info.images()
            
            if not os.path.isfile(poster_file) or not os.path.isfile(poster_missing):
                if USE_POSTERS:
                    poster_url = None
                    for image in images:
                        if image['type'] == 'poster':
                            if image['size'] == POSTER_QUALITY:
                                poster_url = image['url']
                                break
                    if poster_url:
                        try:
                            urllib.urlretrieve(poster_url, poster_file)
                        except:
                            pass
                    else:
                        write_to_file(poster_missing, '')
            
            if not os.path.isfile(fanart_file) or not os.path.isfile(fanart_missing):
                if USE_FANART:
                    fanart_url = None
                    for image in images:
                        if image['type'] == 'backdrop':
                            if image['size'] == FANART_QUALITY:
                                fanart_url = image['url']
                                break
                    if fanart_url:
                        try:
                            urllib.urlretrieve(fanart_url, fanart_file)
                        except:
                            pass
                    else:
                        try:
                            write_to_file(fanart_missing, '')
                        except:
                            pass
Ejemplo n.º 38
0
def run(nav_file, ma_type):
  nav_data = common.read_from_file(nav_file)
  fund_names = nav_data[0].split(',')[1:]
  del nav_data[1:7]
  ma_data = get_ma_data(nav_data)
  del nav_data[0:7]
  
  cashflows = common.init_array_dict(fund_names)
  fund_inv_dict = common.init_dict(fund_names)
  last_inv_dict = common.init_dict(fund_names, default_inv)
  returns_halfyr = common.init_array_dict(fund_names)
  returns_annual = common.init_array_dict(fund_names)
  units_dict_halfyr = common.init_dict(fund_names)
  units_dict_annual = common.init_dict(fund_names)
  units_dict_overall = common.init_dict(fund_names)
  
  cnt = len(nav_data)
  max_total_inv = default_inv * (cnt - 1)
  for i in xrange(0, cnt):
  
    row_data = nav_data[i].split(',')
    dt = datetime.strptime(row_data[0], '%d-%m-%Y')
    fund_nav = row_data[1:]
    fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav)

    # half-yearly returns for each fund
    if i % 6 == 0 and i > 0:
      wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr)
      for fund in fund_names:
        cashflows_halfyr = cashflows[fund][i-6:i] # slice last 6 months cashflows
        if is_cashflow_missing(cashflows_halfyr):
          continue
        
        cf = (dt, wealth[fund])
        cashflows_halfyr.append(cf)
        ret = common.xirr(cashflows_halfyr)
        returns_halfyr[fund].append(ret)

      # clean up for next pass
      units_dict_halfyr = common.init_dict(fund_names)
    
    # annual returns for each fund
    if i % 12 == 0 and i > 0:
      wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual)
      for fund in fund_names:
        cashflows_annual = cashflows[fund][i-12:i] # slice last 12 months cashflows
        if is_cashflow_missing(cashflows_annual):
          continue
        
        cf = (dt, wealth[fund])
        cashflows_annual.append(cf)
        ret = common.xirr(cashflows_annual)
        returns_annual[fund].append(ret)
      
      # clean up for next pass
      units_dict_annual = common.init_dict(fund_names)
    
    # no investment on the last date
    if i == cnt - 1:
      break
    
    for f in fund_names:
      
      # cap total investment
      allowed_inv = max_total_inv - fund_inv_dict[f]
    
      prev_inv = last_inv_dict[f]
      nav = fund_nav_dict[f]
      ma = ma_data[f][i]
      
      mnt_inv = get_mnt_inv(ma_type, prev_inv, nav, ma)
      mnt_inv = min(mnt_inv, allowed_inv)
      units = mnt_inv / nav
      units_dict_overall[f] += units
      units_dict_halfyr[f] += units
      units_dict_annual[f] += units
      
      last_inv_dict[f] = mnt_inv
      fund_inv_dict[f] += mnt_inv
      cf = (dt, -mnt_inv)
      cashflows[f].append(cf)
      
  file_data = []
  
  header_line = \
    'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \
    'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \
    'Annual Return Mean,Annual Return Std Dev,Annual Sharpe'
  file_data.append(header_line)
  
  # final wealth
  nav_line = nav_data[cnt - 1].split(',')[1:]
  fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
  wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall)

  # performance stats for each fund
  last_date = nav_data[cnt - 1].split(',')[0]
  dt = datetime.strptime(last_date, '%d-%m-%Y')
  for fund in sorted(fund_names):    
    fund_cashflows = cashflows[fund][:]
    cf = (dt, wealth[fund])
    fund_cashflows.append(cf)
    fund_inv = fund_inv_dict[fund]
    abs_return = ((wealth[fund] / fund_inv) - 1)
    ann_return = common.xirr(fund_cashflows)
  
    hfr = returns_halfyr[fund]
    halfyr_rf_rate = common.get_rf_rate('half-yearly')
    halfyr_return_mean = numpy.mean(hfr)
    halfyr_return_std = numpy.std(hfr)
    halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate)

    afr = returns_annual[fund]
    annual_rf_rate = common.get_rf_rate('annual')
    annual_return_mean = numpy.mean(afr)
    annual_return_std = numpy.std(afr)
    annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate)
  
    line_data = \
      fund + ',' + str(fund_inv) + ',' + str(wealth[fund]) + ',' + \
      str(abs_return) + ',' + str(ann_return) + ',' + \
      str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \
      str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \
      str(annual_return_std) + ',' + str(annual_sharpe)
    file_data.append(line_data)
  
  ma_file_name = 'ma_' + ma_type + '.csv'
  ma_file = os.path.join(data_dir, ma_file_name)
  common.write_to_file(ma_file, file_data)
Ejemplo n.º 39
0
def run(nav_file, rank_file):
    """
  Generates return statistics based on sharpe ratio ranking data.
  """

    # create data directory
    common.create_dir(data_dir)

    # read data files
    nav_data = common.read_from_file(nav_file)
    rank_data = common.read_from_file(rank_file)

    # remove redundant entries in nav_data
    target_date = rank_data[1].split(',')[0]
    common.trim_data(nav_data, target_date)
    assert len(nav_data) == len(rank_data)

    # retrieve fund names
    # the first column (date) is skipped
    fund_names = nav_data[0].split(',')[1:]

    # initialize
    cashflows_halfyr = []
    cashflows_annual = []
    cashflows_overall = []
    returns_halfyr = []
    returns_annual = []
    units_dict_halfyr = common.init_dict(fund_names)
    units_dict_annual = common.init_dict(fund_names)
    units_dict_overall = common.init_dict(fund_names)

    # remove header line
    del nav_data[0]
    del rank_data[0]

    # compute cashflows and returns
    cnt = len(nav_data)
    for i in range(0, cnt):

        (date, fund, nav) = rank_data[i].split(',')
        dt = datetime.strptime(date, '%d-%m-%Y')

        # half-yearly returns
        if i % 6 == 0 and i > 0:
            nav_line = nav_data[i].split(',')[1:]
            fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
            wealth = get_wealth(fund_nav_dict, units_dict_halfyr)
            cf = (dt, wealth)
            cashflows_halfyr.append(cf)
            ret = common.xirr(cashflows_halfyr)
            returns_halfyr.append(ret)

            # clean up for next pass
            del cashflows_halfyr[:]
            units_dict_halfyr[f] = common.init_dict(fund_names)

        # annual returns
        if i % 12 == 0 and i > 0:
            nav_line = nav_data[i].split(',')[1:]
            nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
            wealth = get_wealth(nav_dict, units_dict_annual)
            cf = (dt, wealth)
            cashflows_annual.append(cf)
            ret = common.xirr(cashflows_annual)
            returns_annual.append(ret)

            # clean up for next pass
            del cashflows_annual[:]
            units_dict_annual[f] = common.init_dict(fund_names)

        # no investment on the last date
        if i == cnt - 1:
            break

        # units invested
        units = mnt_inv / float(nav)
        units_dict_halfyr[fund] += units
        units_dict_annual[fund] += units
        units_dict_overall[fund] += units

        # cash outflow
        cf = (dt, -mnt_inv)
        cashflows_halfyr.append(cf)
        cashflows_annual.append(cf)
        cashflows_overall.append(cf)

    file_data = []

    # investment details
    file_data.append('Investment Details')
    file_data.append('Fund,Units')
    for f in units_dict_overall:
        if units_dict_overall[f] > 0:
            line_data = f + ',' + str(units_dict_overall[f])
            file_data.append(line_data)
    file_data.append('\n')

    # total investment
    num_inv = len(cashflows_overall)
    total_inv = num_inv * mnt_inv
    file_data.append('Investment,' + str(total_inv))

    # final wealth
    nav_line = nav_data[cnt - 1].split(',')[1:]
    fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
    wealth = get_wealth(fund_nav_dict, units_dict_overall)
    file_data.append('Wealth,' + str(wealth))

    # absolute return
    abs_return = ((wealth / total_inv) - 1)
    file_data.append('Absolute Return,' + str(abs_return))

    # annualized return
    last_date = nav_data[cnt - 1].split(',')[0]
    dt = datetime.strptime(last_date, '%d-%m-%Y')
    cf = (dt, wealth)
    cashflows_overall.append(cf)
    annual_return = common.xirr(cashflows_overall)
    file_data.append('Annualized Return,' + str(annual_return))

    file_data.append('\n')
    file_data.append('Stats,Mean,Std Deviation, Sharpe Ratio')

    # half-yearly return stats
    halfyr_rf_rate = common.get_rf_rate('half-yearly')
    halfyr_mean = numpy.mean(returns_halfyr)
    halfyr_stdev = numpy.std(returns_halfyr)
    halfyr_sharpe = common.get_sharpe_ratio(returns_halfyr, halfyr_rf_rate)
    file_data.append('Half-Yearly,' + str(halfyr_mean) + ',' +
                     str(halfyr_stdev) + ',' + str(halfyr_sharpe))

    # annual return stats
    annual_rf_rate = common.get_rf_rate('annual')
    annual_mean = numpy.mean(returns_annual)
    annual_stdev = numpy.std(returns_annual)
    annual_sharpe = common.get_sharpe_ratio(returns_annual, annual_rf_rate)
    file_data.append('Annual,' + str(annual_mean) + ',' + str(annual_stdev) +
                     ',' + str(annual_sharpe))

    # save stats to file
    benchmark_file = os.path.join(data_dir, benchmark_file_name)
    common.write_to_file(benchmark_file, file_data)
Ejemplo n.º 40
0
def subscribe(name, mode):
    if subscription_index(name, mode) >= 0:
        return
    content = str(name) + '\t' + str(mode) + '\n'
    write_to_file(SUBSCRIPTION_FILE, content, append=True)
Ejemplo n.º 41
0
def run(nav_file):
  
  # create data directory
  common.create_dir(data_dir)
  
  # read nav data
  nav_data = common.read_from_file(nav_file)
  
  # remove first 12 entries in nav_data 
  # to compare results with benchmark
  del nav_data[1:13]

  # retrieve fund names
  # the first column (date) is skipped
  fund_names = nav_data[0].split(',')[1:]
  
  # initialize
  cashflows = []
  returns_halfyr = common.init_array_dict(fund_names)
  returns_annual = common.init_array_dict(fund_names)
  units_dict_halfyr = common.init_dict(fund_names)
  units_dict_annual = common.init_dict(fund_names)
  units_dict_overall = common.init_dict(fund_names)
  
  # remove header line
  del nav_data[0]

  # compute cashflows and returns
  cnt = len(nav_data)
  for i in range(0, cnt):

    row_data = nav_data[i].split(',')
    dt = datetime.strptime(row_data[0], '%d-%m-%Y')
    fund_nav = row_data[1:]
    fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav)
      
    # half-yearly returns for each fund
    if i % 6 == 0 and i > 0:
      wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr)
      for fund in fund_names:
        cashflows_halfyr = cashflows[i-6:i] # slice last 6 months cashflows
        cf = (dt, wealth[fund])
        cashflows_halfyr.append(cf)
        ret = common.xirr(cashflows_halfyr)
        returns_halfyr[fund].append(ret)

      # clean up for next pass
      units_dict_halfyr = common.init_dict(fund_names)

    # annual returns for each fund
    if i % 12 == 0 and i > 0:
      wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual)
      for fund in fund_names:
        cashflows_annual = cashflows[i-12:i] # slice last 12 months cashflows
        cf = (dt, wealth[fund])
        cashflows_annual.append(cf)
        ret = common.xirr(cashflows_annual)
        returns_annual[fund].append(ret)
      
      # clean up for next pass
      units_dict_annual = common.init_dict(fund_names)
    
    # no investment on the last date
    if i == cnt - 1:
      break
    
    # invested units
    for fund in fund_names:
      nav = fund_nav_dict[fund]
      units = mnt_inv / nav
      units_dict_halfyr[fund] += units
      units_dict_annual[fund] += units
      units_dict_overall[fund] += units
    
    # cash outflow
    cf = (dt, -mnt_inv)
    cashflows.append(cf)
  
  file_data = []
  
  header_line = \
    'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \
    'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \
    'Annual Return Mean,Annual Return Std Dev,Annual Sharpe'
  file_data.append(header_line)
    
  # total investment
  num_inv = len(cashflows)
  total_inv = num_inv * mnt_inv
  
  # final wealth
  nav_line = nav_data[cnt - 1].split(',')[1:]
  fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
  wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall)
  
  # performance stats for each fund
  last_date = nav_data[cnt - 1].split(',')[0]
  dt = datetime.strptime(last_date, '%d-%m-%Y')
  for fund in sorted(fund_names):
    fund_cashflows = cashflows[:]
    cf = (dt, wealth[fund])
    fund_cashflows.append(cf)
    abs_return = ((wealth[fund] / total_inv) - 1)
    ann_return = common.xirr(fund_cashflows)
    
    hfr = returns_halfyr[fund]
    halfyr_rf_rate = common.get_rf_rate('half-yearly')
    halfyr_return_mean = numpy.mean(hfr)
    halfyr_return_std = numpy.std(hfr)
    halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate)

    afr = returns_annual[fund]
    annual_rf_rate = common.get_rf_rate('annual')
    annual_return_mean = numpy.mean(afr)
    annual_return_std = numpy.std(afr)
    annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate)
    
    line_data = \
      fund + ',' + str(total_inv) + ',' + str(wealth[fund]) + ',' + \
      str(abs_return) + ',' + str(ann_return) + ',' + \
      str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \
      str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \
      str(annual_return_std) + ',' + str(annual_sharpe)
    file_data.append(line_data)

  regular_sip_file = os.path.join(data_dir, regular_sip_file_name)
  common.write_to_file(regular_sip_file, file_data)
Ejemplo n.º 42
0
    else:
        print "Successfully logged in!"

        #TASK 1 - Change interface descriptions
        if set_intf_desc(switch, token,
                         get_interfaces(switch, token, 'l1PhysIf')):
            print "Successfully updated interface descritions"

        #TASK 2 - Configure a Loopback and BGP
        #first create the loopback
        set_loopback(switch, token)
        #Ensure the BGP configuration has succeeded, otherwise dont try to get the BGP config
        if set_bgp_config(switch, token):
            print "Successfully modified BGP configuration"
            get_bgp_config(switch, token)
        else:
            print "Failed to modify BGP configuration"

        #TASK 3 - Write device information to a file
        fname = 'nxsov.txt'
        write_to_file(
            fname,
            get_boot_img(switch, token) + get_device_info(switch, token) +
            get_bgp_config(switch, token))
        print 'Wrote device information to ' + fname + '\n'

        if aaa_logout(user, switch, token) == requests.codes.ok:
            print "Successfully logged out!"
        else:
            print "Logout Unsuccessful"
Ejemplo n.º 43
0
        if prereq:
            course = ' '.join([dept, course_no])
            prereq_map[course] = prereq
    return prereq_map


def get_all_prereqs(course_map, browser):
    prereqs = []
    for dept, numbers in course_map.items():
        print(f'Dept: {dept}')
        prereq_map = get_prereqs_for_dept(dept, numbers, browser)
        prereqs.append(prereq_map)
    return prereqs


if __name__ == "__main__":
    http = urllib3.PoolManager()
    course_codes = get_course_codes(http)
    if not os.path.exists(COURSE_MAP_FILE_PATH):
        print('Course map does not exist')
        course_map = get_course_numbers(course_codes, http)
        write_to_file(course_map, COURSE_MAP_FILE_PATH)
    else:
        print('Course map exists')
        course_map = read_json_from_file(COURSE_MAP_FILE_PATH)
    if not os.path.exists(PREREQ_FILE_PATH):
        prereqs = get_all_prereqs(course_map, http)
        write_to_file(prereqs, PREREQ_FILE_PATH)
    else:
        print('Prereq file already exists')
Ejemplo n.º 44
0
def run(nav_file, rank_file):
  """
  Generates return statistics based on sharpe ratio ranking data.
  """

  # create data directory
  common.create_dir(data_dir)

  # read data files
  nav_data = common.read_from_file(nav_file)
  rank_data = common.read_from_file(rank_file)
  
  # remove redundant entries in nav_data
  target_date = rank_data[1].split(',')[0]
  common.trim_data(nav_data, target_date)
  assert len(nav_data) == len(rank_data)

  # retrieve fund names
  # the first column (date) is skipped
  fund_names = nav_data[0].split(',')[1:]

  # initialize
  cashflows_halfyr = []
  cashflows_annual = []
  cashflows_overall = []
  returns_halfyr = []
  returns_annual = []
  units_dict_halfyr = common.init_dict(fund_names)
  units_dict_annual = common.init_dict(fund_names)
  units_dict_overall = common.init_dict(fund_names)

  # remove header line
  del nav_data[0]
  del rank_data[0] 

  # compute cashflows and returns
  cnt = len(nav_data)
  for i in range(0, cnt):
  
    (date, fund, nav) = rank_data[i].split(',')
    dt = datetime.strptime(date, '%d-%m-%Y')
  
    # half-yearly returns
    if i % 6 == 0 and i > 0:
      nav_line = nav_data[i].split(',')[1:]
      fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
      wealth = get_wealth(fund_nav_dict, units_dict_halfyr)
      cf = (dt, wealth)
      cashflows_halfyr.append(cf)
      ret = common.xirr(cashflows_halfyr)
      returns_halfyr.append(ret)

      # clean up for next pass
      del cashflows_halfyr[:]
      units_dict_halfyr[f] = common.init_dict(fund_names)
      
    # annual returns
    if i % 12 == 0 and i > 0:
      nav_line = nav_data[i].split(',')[1:]
      nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
      wealth = get_wealth(nav_dict, units_dict_annual)
      cf = (dt, wealth)
      cashflows_annual.append(cf)
      ret = common.xirr(cashflows_annual)
      returns_annual.append(ret)

      # clean up for next pass
      del cashflows_annual[:]
      units_dict_annual[f] = common.init_dict(fund_names)
    
    # no investment on the last date
    if i == cnt - 1:
      break
    
    # units invested
    units = mnt_inv / float(nav)
    units_dict_halfyr[fund] += units
    units_dict_annual[fund] += units
    units_dict_overall[fund] += units

    # cash outflow
    cf = (dt, -mnt_inv)
    cashflows_halfyr.append(cf)
    cashflows_annual.append(cf)
    cashflows_overall.append(cf)
  
  file_data = []
  
  # investment details
  file_data.append('Investment Details')
  file_data.append('Fund,Units')
  for f in units_dict_overall:
    if units_dict_overall[f] > 0:
      line_data = f + ','  + str(units_dict_overall[f])
      file_data.append(line_data)
  file_data.append('\n')
  
  # total investment
  num_inv = len(cashflows_overall)
  total_inv = num_inv * mnt_inv
  file_data.append('Investment,' + str(total_inv))
  
  # final wealth
  nav_line = nav_data[cnt - 1].split(',')[1:]
  fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
  wealth = get_wealth(fund_nav_dict, units_dict_overall)
  file_data.append('Wealth,' + str(wealth))
  
  # absolute return
  abs_return = ((wealth / total_inv) - 1)
  file_data.append('Absolute Return,' + str(abs_return))
  
  # annualized return
  last_date = nav_data[cnt - 1].split(',')[0]
  dt = datetime.strptime(last_date, '%d-%m-%Y')
  cf = (dt, wealth)
  cashflows_overall.append(cf)
  annual_return = common.xirr(cashflows_overall)
  file_data.append('Annualized Return,' + str(annual_return))
  
  file_data.append('\n')
  file_data.append('Stats,Mean,Std Deviation, Sharpe Ratio')
  
  # half-yearly return stats
  halfyr_rf_rate = common.get_rf_rate('half-yearly')
  halfyr_mean = numpy.mean(returns_halfyr)
  halfyr_stdev = numpy.std(returns_halfyr)
  halfyr_sharpe = common.get_sharpe_ratio(returns_halfyr, halfyr_rf_rate)
  file_data.append('Half-Yearly,' + str(halfyr_mean) + ',' + str(halfyr_stdev) + ',' + str(halfyr_sharpe))
  
  # annual return stats
  annual_rf_rate = common.get_rf_rate('annual')
  annual_mean = numpy.mean(returns_annual)
  annual_stdev = numpy.std(returns_annual)
  annual_sharpe = common.get_sharpe_ratio(returns_annual, annual_rf_rate)
  file_data.append('Annual,' + str(annual_mean) + ',' + str(annual_stdev) + ',' + str(annual_sharpe))
  
  # save stats to file
  benchmark_file = os.path.join(data_dir, benchmark_file_name)
  common.write_to_file(benchmark_file, file_data)
Ejemplo n.º 45
0
    return compressed_matrix


print(datetime.datetime.now())
print("Reading articles")
# articles = read_articles()[1:]
size = 10000
articles = read_articles()[1:size + 1]
name = 'matrix_' + str(size)
print("There are {0} articles".format(size))

print(datetime.datetime.now())
print("Preparing")
articles = prepare_articles(articles)
articles_str = [",".join(a) for a in articles]
write_to_file('p_articles_{0}.txt'.format(size), "\n".join(articles_str))
# print("Cached")
# articles = [a.split(',') for a in read_file('p_articles_{0}.txt'.format(size)).split('\n')]

print(datetime.datetime.now())
print("Processing words to vector")
words_vector = extract_and_process_words(articles)
write_to_file('words_{0}.txt'.format(size), ",".join(words_vector))
# print("Cached")
# words_vector = read_file('words_{0}.txt'.format(size)).split(',')

print(datetime.datetime.now())
print("Creating sparse matrix")
word_matrix = as_sprase_matrix(words_vector, articles)
sparse.save_npz('{0}.npz'.format(name), word_matrix)
# print("Cached")
Ejemplo n.º 46
0
            return temp
    return result


if __name__ == "__main__":
    prereqs = read_json_from_file(PREREQ_FILE_PATH)
    merged = {}
    for d in prereqs:
        merged.update(d)
    prereqs = merged
    remove_prereq_string(prereqs)

    courses_to_remove = []
    for course, value in prereqs.items():
        classes = re.findall(COURSE_REGEX, value)
        if len(classes) == 0:
            courses_to_remove.append(course)
    [prereqs.pop(course) for course in courses_to_remove]

    converted_prereq = {}
    for course, prereq in prereqs.items():
        converted_prereq[course] = get_prereq_from_string(prereq)

    for course in converted_prereq:
        print(course)
        print(prereqs[course])
        print(converted_prereq[course])
        print('')

    write_to_file(converted_prereq, PARSED_PREREQ_PATH)
Ejemplo n.º 47
0
def run(nav_file):

    # create data directory
    common.create_dir(data_dir)

    # read nav data
    nav_data = common.read_from_file(nav_file)

    # remove first 12 entries in nav_data
    # to compare results with benchmark
    del nav_data[1:13]

    # retrieve fund names
    # the first column (date) is skipped
    fund_names = nav_data[0].split(',')[1:]

    # initialize
    cashflows = []
    returns_halfyr = common.init_array_dict(fund_names)
    returns_annual = common.init_array_dict(fund_names)
    units_dict_halfyr = common.init_dict(fund_names)
    units_dict_annual = common.init_dict(fund_names)
    units_dict_overall = common.init_dict(fund_names)

    # remove header line
    del nav_data[0]

    # compute cashflows and returns
    cnt = len(nav_data)
    for i in range(0, cnt):

        row_data = nav_data[i].split(',')
        dt = datetime.strptime(row_data[0], '%d-%m-%Y')
        fund_nav = row_data[1:]
        fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav)

        # half-yearly returns for each fund
        if i % 6 == 0 and i > 0:
            wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr)
            for fund in fund_names:
                cashflows_halfyr = cashflows[
                    i - 6:i]  # slice last 6 months cashflows
                cf = (dt, wealth[fund])
                cashflows_halfyr.append(cf)
                ret = common.xirr(cashflows_halfyr)
                returns_halfyr[fund].append(ret)

            # clean up for next pass
            units_dict_halfyr = common.init_dict(fund_names)

        # annual returns for each fund
        if i % 12 == 0 and i > 0:
            wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual)
            for fund in fund_names:
                cashflows_annual = cashflows[
                    i - 12:i]  # slice last 12 months cashflows
                cf = (dt, wealth[fund])
                cashflows_annual.append(cf)
                ret = common.xirr(cashflows_annual)
                returns_annual[fund].append(ret)

            # clean up for next pass
            units_dict_annual = common.init_dict(fund_names)

        # no investment on the last date
        if i == cnt - 1:
            break

        # invested units
        for fund in fund_names:
            nav = fund_nav_dict[fund]
            units = mnt_inv / nav
            units_dict_halfyr[fund] += units
            units_dict_annual[fund] += units
            units_dict_overall[fund] += units

        # cash outflow
        cf = (dt, -mnt_inv)
        cashflows.append(cf)

    file_data = []

    header_line = \
      'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \
      'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \
      'Annual Return Mean,Annual Return Std Dev,Annual Sharpe'
    file_data.append(header_line)

    # total investment
    num_inv = len(cashflows)
    total_inv = num_inv * mnt_inv

    # final wealth
    nav_line = nav_data[cnt - 1].split(',')[1:]
    fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
    wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall)

    # performance stats for each fund
    last_date = nav_data[cnt - 1].split(',')[0]
    dt = datetime.strptime(last_date, '%d-%m-%Y')
    for fund in sorted(fund_names):
        fund_cashflows = cashflows[:]
        cf = (dt, wealth[fund])
        fund_cashflows.append(cf)
        abs_return = ((wealth[fund] / total_inv) - 1)
        ann_return = common.xirr(fund_cashflows)

        hfr = returns_halfyr[fund]
        halfyr_rf_rate = common.get_rf_rate('half-yearly')
        halfyr_return_mean = numpy.mean(hfr)
        halfyr_return_std = numpy.std(hfr)
        halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate)

        afr = returns_annual[fund]
        annual_rf_rate = common.get_rf_rate('annual')
        annual_return_mean = numpy.mean(afr)
        annual_return_std = numpy.std(afr)
        annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate)

        line_data = \
          fund + ',' + str(total_inv) + ',' + str(wealth[fund]) + ',' + \
          str(abs_return) + ',' + str(ann_return) + ',' + \
          str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \
          str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \
          str(annual_return_std) + ',' + str(annual_sharpe)
        file_data.append(line_data)

    regular_sip_file = os.path.join(data_dir, regular_sip_file_name)
    common.write_to_file(regular_sip_file, file_data)
Ejemplo n.º 48
0
def run(nav_file, ma_type):
  nav_data = common.read_from_file(nav_file)
  fund_names = nav_data[0].split(',')[1:]
  del nav_data[1:7]
  ma_data = get_ma_data(nav_data)
  del nav_data[0:7]
  
  cashflows = common.init_array_dict(fund_names)
  fund_inv_dict = common.init_dict(fund_names)
  fund_corpus_dict = common.init_dict(fund_names)
  fund_corpus_index_dict = common.init_array_dict(fund_names)
  last_inv_dict = common.init_dict(fund_names, default_inv)
  returns_halfyr = common.init_array_dict(fund_names)
  returns_annual = common.init_array_dict(fund_names)
  units_dict_halfyr = common.init_dict(fund_names)
  units_dict_annual = common.init_dict(fund_names)
  units_dict_overall = common.init_dict(fund_names)
  
  cnt = len(nav_data)
  max_total_inv = default_inv * (cnt - 1)
  for i in xrange(0, cnt):
  
    row_data = nav_data[i].split(',')
    dt = datetime.strptime(row_data[0], '%d-%m-%Y')
    fund_nav = row_data[1:]
    fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav)
    
    # half-yearly returns for each fund
    if i % 6 == 0 and i > 0:
      
      wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr)
      for fund in fund_names:
        start_corpus = fund_corpus_index_dict[fund][i-7]
        end_corpus = fund_corpus_index_dict[fund][i-1]
        corpus_wealth = end_corpus - start_corpus
        total_wealth = wealth[fund] + corpus_wealth
        
        cashflows_halfyr = cashflows[fund][i-6:i] # slice last 6 months cashflows
        if is_cashflow_missing(cashflows_halfyr):
          continue
          
        cf = (dt, total_wealth)
        cashflows_halfyr.append(cf)
        ret = common.xirr(cashflows_halfyr)
        returns_halfyr[fund].append(ret)

      # clean up
      units_dict_halfyr = common.init_dict(fund_names)

    # annual returns for each fund
    if i % 12 == 0 and i > 0:
      
      wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual)
      for fund in fund_names:
        start_corpus = fund_corpus_index_dict[fund][i-13]
        end_corpus = fund_corpus_index_dict[fund][i-1]
        corpus_wealth = end_corpus - start_corpus
        total_wealth = wealth[fund] + corpus_wealth
      
        cashflows_annual = cashflows[fund][i-12:i] # slice last 12 months cashflows
        if is_cashflow_missing(cashflows_annual):
          continue
          
        cf = (dt, wealth[fund] + fund_corpus_dict[fund])
        cashflows_annual.append(cf)
        ret = common.xirr(cashflows_annual)
        returns_annual[fund].append(ret)

      # clean up
      units_dict_annual = common.init_dict(fund_names)
    
    # no investment on the last date
    if i == cnt - 1:
      break
    
    for f in fund_names:
      
      # cap total investment
      allowed_inv = max_total_inv - fund_inv_dict[f]
    
      prev_inv = last_inv_dict[f]
      nav = fund_nav_dict[f]
      ma = ma_data[f][i]
      
      # equity investment
      mnt_inv = get_mnt_inv(ma_type, prev_inv, nav, ma)
      mnt_inv = min(mnt_inv, allowed_inv)
      last_inv_dict[f] = mnt_inv
      allowed_inv -= mnt_inv
      
      # debt investment
      corpus = fund_corpus_dict[f]
      debt_inv = default_inv - mnt_inv
      if debt_inv < 0:
        debt_inv = -min(mnt_inv - default_inv, corpus)
      else:
        debt_inv = min(debt_inv, allowed_inv)
        
      # corpus investment + interest
      corpus += debt_inv
      interest = corpus * int_rate
      corpus += interest
      fund_corpus_dict[f] = corpus
      fund_corpus_index_dict[f].append(corpus)
      
      # total investment
      total_inv = mnt_inv + debt_inv
      fund_inv_dict[f] += total_inv

      # invested units
      units = mnt_inv / nav
      units_dict_overall[f] += units
      units_dict_halfyr[f] += units
      units_dict_annual[f] += units

      # cashflows
      cf = (dt, -total_inv)
      cashflows[f].append(cf)

      # debugging
      # if f == 'Birla_Advantage_Fund':
        # print '%d\t%d\t%d\t%.2f\t%d\t%d' % (mnt_inv, debt_inv, round(fund_inv_dict[f]), units, -total_inv, round(corpus))

  file_data = []
  
  header_line = \
    'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \
    'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \
    'Annual Return Mean,Annual Return Std Dev,Annual Sharpe'
  file_data.append(header_line)
  
  # final wealth
  nav_line = nav_data[cnt - 1].split(',')[1:]
  fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
  wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall)

  # performance stats for each fund
  last_date = nav_data[cnt - 1].split(',')[0]
  dt = datetime.strptime(last_date, '%d-%m-%Y')
  for fund in sorted(fund_names):    
    total_wealth = wealth[fund] + fund_corpus_dict[fund]
    fund_cashflows = cashflows[fund][:]
    cf = (dt, total_wealth)
    fund_cashflows.append(cf)
    
    fund_inv = fund_inv_dict[fund]
    abs_return = ((total_wealth / fund_inv) - 1)
    ann_return = common.xirr(fund_cashflows)
    
    hfr = returns_halfyr[fund]
    halfyr_rf_rate = common.get_rf_rate('half-yearly')
    halfyr_return_mean = numpy.mean(hfr)
    halfyr_return_std = numpy.std(hfr)
    halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate)

    afr = returns_annual[fund]
    annual_rf_rate = common.get_rf_rate('annual')
    annual_return_mean = numpy.mean(afr)
    annual_return_std = numpy.std(afr)
    annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate)
  
    line_data = \
      fund + ',' + str(fund_inv) + ',' + str(total_wealth) + ',' + \
      str(abs_return) + ',' + str(ann_return) + ',' + \
      str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \
      str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \
      str(annual_return_std) + ',' + str(annual_sharpe)
    file_data.append(line_data)
  
  ma_file_name = 'ma_with_debt_' + ma_type + '.csv'
  ma_file = os.path.join(data_dir, ma_file_name)
  common.write_to_file(ma_file, file_data)
Ejemplo n.º 49
0
def run(nav_file, ma_type):
    nav_data = common.read_from_file(nav_file)
    fund_names = nav_data[0].split(',')[1:]
    del nav_data[1:7]
    ma_data = get_ma_data(nav_data)
    del nav_data[0:7]

    cashflows = common.init_array_dict(fund_names)
    fund_inv_dict = common.init_dict(fund_names)
    fund_corpus_dict = common.init_dict(fund_names)
    fund_corpus_index_dict = common.init_array_dict(fund_names)
    last_inv_dict = common.init_dict(fund_names, default_inv)
    returns_halfyr = common.init_array_dict(fund_names)
    returns_annual = common.init_array_dict(fund_names)
    units_dict_halfyr = common.init_dict(fund_names)
    units_dict_annual = common.init_dict(fund_names)
    units_dict_overall = common.init_dict(fund_names)

    cnt = len(nav_data)
    max_total_inv = default_inv * (cnt - 1)
    for i in xrange(0, cnt):

        row_data = nav_data[i].split(',')
        dt = datetime.strptime(row_data[0], '%d-%m-%Y')
        fund_nav = row_data[1:]
        fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav)

        # half-yearly returns for each fund
        if i % 6 == 0 and i > 0:

            wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr)
            for fund in fund_names:
                start_corpus = fund_corpus_index_dict[fund][i - 7]
                end_corpus = fund_corpus_index_dict[fund][i - 1]
                corpus_wealth = end_corpus - start_corpus
                total_wealth = wealth[fund] + corpus_wealth

                cashflows_halfyr = cashflows[fund][
                    i - 6:i]  # slice last 6 months cashflows
                if is_cashflow_missing(cashflows_halfyr):
                    continue

                cf = (dt, total_wealth)
                cashflows_halfyr.append(cf)
                ret = common.xirr(cashflows_halfyr)
                returns_halfyr[fund].append(ret)

            # clean up
            units_dict_halfyr = common.init_dict(fund_names)

        # annual returns for each fund
        if i % 12 == 0 and i > 0:

            wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual)
            for fund in fund_names:
                start_corpus = fund_corpus_index_dict[fund][i - 13]
                end_corpus = fund_corpus_index_dict[fund][i - 1]
                corpus_wealth = end_corpus - start_corpus
                total_wealth = wealth[fund] + corpus_wealth

                cashflows_annual = cashflows[fund][
                    i - 12:i]  # slice last 12 months cashflows
                if is_cashflow_missing(cashflows_annual):
                    continue

                cf = (dt, wealth[fund] + fund_corpus_dict[fund])
                cashflows_annual.append(cf)
                ret = common.xirr(cashflows_annual)
                returns_annual[fund].append(ret)

            # clean up
            units_dict_annual = common.init_dict(fund_names)

        # no investment on the last date
        if i == cnt - 1:
            break

        for f in fund_names:

            # cap total investment
            allowed_inv = max_total_inv - fund_inv_dict[f]

            prev_inv = last_inv_dict[f]
            nav = fund_nav_dict[f]
            ma = ma_data[f][i]

            # equity investment
            mnt_inv = get_mnt_inv(ma_type, prev_inv, nav, ma)
            mnt_inv = min(mnt_inv, allowed_inv)
            last_inv_dict[f] = mnt_inv
            allowed_inv -= mnt_inv

            # debt investment
            corpus = fund_corpus_dict[f]
            debt_inv = default_inv - mnt_inv
            if debt_inv < 0:
                debt_inv = -min(mnt_inv - default_inv, corpus)
            else:
                debt_inv = min(debt_inv, allowed_inv)

            # corpus investment + interest
            corpus += debt_inv
            interest = corpus * int_rate
            corpus += interest
            fund_corpus_dict[f] = corpus
            fund_corpus_index_dict[f].append(corpus)

            # total investment
            total_inv = mnt_inv + debt_inv
            fund_inv_dict[f] += total_inv

            # invested units
            units = mnt_inv / nav
            units_dict_overall[f] += units
            units_dict_halfyr[f] += units
            units_dict_annual[f] += units

            # cashflows
            cf = (dt, -total_inv)
            cashflows[f].append(cf)

            # debugging
            # if f == 'Birla_Advantage_Fund':
            # print '%d\t%d\t%d\t%.2f\t%d\t%d' % (mnt_inv, debt_inv, round(fund_inv_dict[f]), units, -total_inv, round(corpus))

    file_data = []

    header_line = \
      'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \
      'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \
      'Annual Return Mean,Annual Return Std Dev,Annual Sharpe'
    file_data.append(header_line)

    # final wealth
    nav_line = nav_data[cnt - 1].split(',')[1:]
    fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line)
    wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall)

    # performance stats for each fund
    last_date = nav_data[cnt - 1].split(',')[0]
    dt = datetime.strptime(last_date, '%d-%m-%Y')
    for fund in sorted(fund_names):
        total_wealth = wealth[fund] + fund_corpus_dict[fund]
        fund_cashflows = cashflows[fund][:]
        cf = (dt, total_wealth)
        fund_cashflows.append(cf)

        fund_inv = fund_inv_dict[fund]
        abs_return = ((total_wealth / fund_inv) - 1)
        ann_return = common.xirr(fund_cashflows)

        hfr = returns_halfyr[fund]
        halfyr_rf_rate = common.get_rf_rate('half-yearly')
        halfyr_return_mean = numpy.mean(hfr)
        halfyr_return_std = numpy.std(hfr)
        halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate)

        afr = returns_annual[fund]
        annual_rf_rate = common.get_rf_rate('annual')
        annual_return_mean = numpy.mean(afr)
        annual_return_std = numpy.std(afr)
        annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate)

        line_data = \
          fund + ',' + str(fund_inv) + ',' + str(total_wealth) + ',' + \
          str(abs_return) + ',' + str(ann_return) + ',' + \
          str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \
          str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \
          str(annual_return_std) + ',' + str(annual_sharpe)
        file_data.append(line_data)

    ma_file_name = 'ma_with_debt_' + ma_type + '.csv'
    ma_file = os.path.join(data_dir, ma_file_name)
    common.write_to_file(ma_file, file_data)