def run(): file_data = [] header_line = 'Category,Inv Period,Return,Std Dev,Sharpe' file_data.append(header_line) for header in sorted(out_file_dict, key=sort_val): out_file = out_file_dict[header] out_file_path = os.path.join('output', out_file) out_data = common.read_from_file(out_file_path) del out_data[0] inv_period_data = [] ret_data = [] sharpe_data = [] for r in out_data: row_data = r.split(',') inv_period_data.append(float(row_data[2])) ret_data.append(float(row_data[5])) sharpe_data.append(float(row_data[6])) inv_period = numpy.mean(inv_period_data) ret = numpy.mean(ret_data) stdev = numpy.std(ret_data) sharpe = numpy.mean(sharpe_data) line_data = header + ',' + str(inv_period) + ',' + str(ret) + ',' \ + str(stdev) + ',' + str(sharpe) file_data.append(line_data) rank_file = os.path.join('output', 'ranked.csv') rank_data = common.read_from_file(rank_file) del rank_data[0] for r in rank_data: row_data = r.split(',') category = row_data[0].capitalize() + ' ' + row_data[1] ret = row_data[5] sharpe = row_data[6] line_data = category + ',1.0,' + ret + ',,' + sharpe file_data.append(line_data) equalWt_file = os.path.join('output', 'equalWt.csv') equalWt_data = common.read_from_file(equalWt_file) del equalWt_data[0] row_data = equalWt_data[0].split(',') line_data = 'Equal Weighted,1.0,' + row_data[3] + ',,' + row_data[4] file_data.append(line_data) summary_file = os.path.join('output', 'summary.csv') common.write_to_file(summary_file, file_data)
def batch_work(ori, csvFile): for ii in range(len(ori)): print("start: %d", ii) df = common.read_from_file(SOURCE_IMG_FILE_FOLDER + ori[ii] + csvFile[ii]) csvFile = SOURCE_IMG_FILE_FOLDER + ori[ii] + csvFile[ii].split( '.csv')[0] + '_new.csv' glassSet = [] for row in df.iterrows(): #Extract Important Imformation file_name = row[1]['FILE'] full_path_image_name = SOURCE_IMG_FILE_FOLDER + ori[ii] + file_name fullImg = os.path.abspath(full_path_image_name) + '\n' roi_x = int(row[1]['FACE_X']) roi_y = int(row[1]['FACE_Y']) roi_w = int(row[1]['FACE_WIDTH']) roi_h = int(row[1]['FACE_HEIGHT']) orc_img = cv2.imread(os.path.abspath(full_path_image_name)) ori_img = orc_img[roi_y:roi_y + roi_h, roi_x:roi_x + roi_w, :] img = preprocess(ori_img) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() glasses = postprocess(ori_img, out) print(glasses) glassSet.append(glasses) df['BOOLGLASS'] = glassSet df.to_csv(csvFile, mode='a', index=False)
def retrieve_feature_by_vgg_face(facial_image_path, feature_file_path): """Retrieve the deep feature by using vgg face. :param facial_image_path: the path of the facial image :type facial_image_path: string :param feature_file_path: the path of the feature file :type feature_file_path: string :return: the deep feature :rtype: numpy array """ try: # Read feature directly from file if os.path.isfile(feature_file_path): feature = common.read_from_file(feature_file_path) return feature # Retrieve feature assert os.path.isfile(facial_image_path) facial_image = cv2.imread(facial_image_path) facial_image = cv2.resize(facial_image, dsize=(common.VGG_FACE_IMAGE_SIZE, common.VGG_FACE_IMAGE_SIZE)) facial_image = facial_image.astype(np.float32) _ = net.predict([facial_image], oversample=False).ravel() feature = net.blobs["fc7"].data[0] # Successful case. Save feature to file. assert feature is not None common.write_to_file(feature_file_path, feature) return feature except: # Failure case return None
def load_feature_from_file(image_paths, facial_image_extension, feature_extension): """Load feature from file. :param image_paths: the file paths of the images :type image_paths: list :param facial_image_extension: the extension of the facial images :type facial_image_extension: string :param feature_extension: the extension of the feature files :type feature_extension: string :return: the features :rtype: list """ feature_list = [] feature_file_paths = [image_path + facial_image_extension + feature_extension \ for image_path in image_paths] for feature_file_path in feature_file_paths: # Read feature directly from file if os.path.isfile(feature_file_path): feature = common.read_from_file(feature_file_path) feature_list.append(feature) else: feature_list.append(None) return feature_list
def generate_batch_fpoint_from_csv(): ori = ((common.ORI_BATCH1, common.PROCESSED_BATCH1), (common.ORI_BATCH2, common.PROCESSED_BATCH2), (common.ORI_BATCH3, common.PROCESSED_BATCH3)) for pairs in ori: df = common.read_from_file(pairs[0] + pairs[0].split('/')[1] + CSV_FILE_NAME) fpfilename = pairs[0].split('/')[1] + '_fpoint.txt' outfile = open(fpfilename, 'w') for row in df.iterrows(): #Extract Important Imformation face_x = float(row[1]['FACE_X']) face_y = float(row[1]['FACE_Y']) P0_x = float(row[1]['P8X']) P0_y = float(row[1]['P8Y']) P1_x = float(row[1]['P11X']) P1_y = float(row[1]['P11Y']) P2_x = float(row[1]['P15X']) P2_y = float(row[1]['P15Y']) P3_x = float(row[1]['P18X']) P3_y = float(row[1]['P18Y']) P4_x = float(row[1]['P20X']) P4_y = float(row[1]['P20Y']) line = str(P0_x) + ' ' + str(P0_y) + ' ' + str(P1_x) + ' ' + str( P1_y) + ' ' + str(P2_x) + ' ' + str(P2_y) + ' ' + str( P3_x) + ' ' + str(P3_y) + ' ' + str(P4_x) + ' ' + str( P4_y) + '\n' outfile.writelines(line) print 'Done!'
def batch_work(): ori = ((common.ORI_BATCH1, common.PROCESSED_BATCH1), (common.ORI_BATCH2, common.PROCESSED_BATCH2), (common.ORI_BATCH3, common.PROCESSED_BATCH3)) for pairs in ori: df = common.read_from_file(pairs[0] + pairs[0].split('/')[1] + CSV_FILE_NAME) for row in df.iterrows(): #Extract Important Imformation file_name = row[1]['FILE'] roi_x = int(row[1]['FACE_X']) roi_y = int(row[1]['FACE_Y']) roi_w = int(row[1]['FACE_WIDTH']) roi_h = int(row[1]['FACE_HEIGHT']) #Create Dir if not Exist file_name = file_name.strip() dir_name = file_name.split('/')[0] if not os.path.isdir(pairs[1] + dir_name): os.makedirs(pairs[1] + dir_name) #Crop And Resize Image img = cv2.imread(pairs[0] + file_name) img_roi = img[roi_y:(roi_y + roi_h + 1), roi_x:(roi_x + roi_w + 1)] img_roi_resize = cv2.resize(img_roi, (roi_w, roi_h)) #print img.size() cv2.imwrite(pairs[1] + file_name, img_roi_resize) LOG.info('Process %s Done!' % (file_name))
def get_subscriptions(): try: content = read_from_file(SUBSCRIPTION_FILE) lines = content.split('\n') for line in lines: data = line.split('\t') if len(data) == 2: if data[1].startswith('tt'): tv_show_name = data[0] tv_show_imdb = data[1] tv_show_mode = "strm tv show dialog" create_tv_show_strm_files(tv_show_name, tv_show_imdb, tv_show_mode, TV_SHOWS_PATH) else: mode = data[1] items = get_menu_items(name, mode, "", "") for (url, li, isFolder) in items: paramstring = url.replace(sys.argv[0], '') params = get_params(paramstring) movie_name = urllib.unquote_plus(params["name"]) movie_data = urllib.unquote_plus(params["name"]) movie_imdb = urllib.unquote_plus(params["imdb_id"]) movie_mode = "strm movie dialog" create_strm_file(movie_name, movie_data, movie_imdb, movie_mode, MOVIES_PATH) except: xbmc.log("[What the Furk] Failed to fetch subscription")
def read_rollback_info(rollback_file_path): """Search the current directory for the latest rollback-info_* file. Return the content as a deployment list. """ archives = [] lines = common.read_from_file(rollback_file_path) for line in lines: (name, runtime_name, server_group_name) = line.split() server_group = None if server_group_name != "None": server_group = ServerGroup({ "name": server_group_name, "profile": "", "socket-binding-group": "", "socket-binding-port-offset": "", "deployment": {} }) archives.append( Deployment({ "name": name, "runtime-name": runtime_name, "enabled": False }, server_group=server_group )) return archives
def retrieve_facial_image_by_bbox(full_image_path, force_continue=True): """Retrieve the facial image by using bbox coordinates. :param full_image_path: the path of the full image :type full_image_path: string :param force_continue: unused argument, for consistency with other functions :type force_continue: boolean :return: the facial image :rtype: numpy array """ try: # Read the coordinates of facial image from the bbox file bbox_file_path = full_image_path + common.BBOX_EXTENSION y, x, w, h = common.read_from_file(bbox_file_path) x_start = int(x) x_end = int(x + h) y_start = int(y) y_end = int(y + w) # Generate the resized facial image full_image = cv2.imread(full_image_path) facial_image = full_image[x_start:x_end, y_start:y_end, :] facial_image = cv2.resize(facial_image, dsize=(common.FACIAL_IMAGE_SIZE, common.FACIAL_IMAGE_SIZE)) # Successful case assert facial_image is not None return facial_image except: # Failure case return None
def generate_frames_fpoint_from_csv(): df = common.read_from_file('umdfaces_videos_ultraface.csv') fpfilename = 'umdfaces_frames_fpoint.txt' outfile = open(fpfilename, 'w') for row in df.iterrows(): #Extract Important Imformation face_x = float(row[1]['FACE_X']) face_y = float(row[1]['FACE_Y']) P0_x = float(row[1]['P8X']) P0_y = float(row[1]['P8Y']) P1_x = float(row[1]['P11X']) P1_y = float(row[1]['P11Y']) P2_x = float(row[1]['P15X']) P2_y = float(row[1]['P15Y']) P3_x = float(row[1]['P18X']) P3_y = float(row[1]['P18Y']) P4_x = float(row[1]['P20X']) P4_y = float(row[1]['P20Y']) line = str(P0_x) + ' ' + str(P0_y) + ' ' + str(P1_x) + ' ' + str( P1_y) + ' ' + str(P2_x) + ' ' + str(P2_y) + ' ' + str( P3_x) + ' ' + str(P3_y) + ' ' + str(P4_x) + ' ' + str( P4_y) + '\n' outfile.writelines(line) print 'Done!'
def retrieve_feature_by_open_face(facial_image_path, feature_file_path): """Retrieve the deep feature by using open face. :param facial_image_path: the path of the facial image :type facial_image_path: string :param feature_file_path: the path of the feature file :type feature_file_path: string :return: the deep feature :rtype: numpy array """ try: # Read feature directly from file if os.path.isfile(feature_file_path): feature = common.read_from_file(feature_file_path) return feature # Retrieve feature assert os.path.isfile(facial_image_path) facial_image_in_BGR = cv2.imread(facial_image_path) facial_image_in_BGR = cv2.resize(facial_image_in_BGR, dsize=(args.imgDim, args.imgDim)) facial_image_in_RGB = cv2.cvtColor(facial_image_in_BGR, cv2.COLOR_BGR2RGB) feature = net.forward(facial_image_in_RGB) # Successful case. Save feature to file. assert feature is not None common.write_to_file(feature_file_path, feature) return feature except: # Failure case return None
def set_tv_show_meta(listitem, imdb_id, path): (data_file, poster_file, fanart_file, poster_missing, fanart_missing) = _get_meta_paths(imdb_id, path) if os.path.isfile(data_file): try: content = read_from_file(data_file) data = content.split('\n') title = data[0] year = data[1] genre = data[2] overview = data[3] rating = data[4] votes = data[5] premiered = data[6] mpaa = data[7] listitem.setProperty("Video", "true") listitem.setProperty("IsPlayable", "true") listitem.setInfo(type='Video', infoLabels={'title': title, 'year': int(year), 'genre': genre, 'plot': overview, 'rating': float(rating), 'votes': votes, 'premiered': premiered, 'mpaa': mpaa, 'code': imdb_id}) except: pass if os.path.isfile(poster_file) and USE_POSTERS: listitem.setThumbnailImage(poster_file) if os.path.isfile(fanart_file) and USE_FANART: listitem.setProperty('fanart_image', fanart_file) return listitem
def find_search_query(query): try: content = read_from_file(SEARCH_FILE) lines = content.split('\n') index = lines.index(query) return index except: return -1 #Not found
def find_list(query, search_file): try: content = read_from_file(search_file) lines = content.split('\n') index = lines.index(query) return index except: return -1
def subscription_index(name, mode): try: content = read_from_file(SUBSCRIPTION_FILE) line = str(name) + '\t' + str(mode) lines = content.split('\n') index = lines.index(line) return index except: return -1 #Not subscribed
def unsubscribe(name, mode): index = subscription_index(name, mode) if index >= 0: content = read_from_file(SUBSCRIPTION_FILE) lines = content.split('\n') lines.pop(index) s = '' for line in lines: if len(line) > 0: s = s + line + '\n' write_to_file(SUBSCRIPTION_FILE, s)
def remove_from_list(list, file): index = find_list(list, file) if index >= 0: content = read_from_file(file) lines = content.split('\n') lines.pop(index) s = '' for line in lines: if len(line) > 0: s = s + line + '\n' write_to_file(file, s)
def remove_search_query(query): index = find_search_query(query) if index >= 0: content = read_from_file(SEARCH_FILE) lines = content.split('\n') lines.pop(index) s = '' for line in lines: if len(line) > 0: s = s + line + '\n' write_to_file(SEARCH_FILE, s)
def search_menu(): items = [] items.append(create_item_tuple('@Search...', 'manual search')) if os.path.isfile(SEARCH_FILE): s = read_from_file(SEARCH_FILE) search_queries = s.split('\n') for query in search_queries: items.append(create_item_tuple(query, 'manual search', data=query)) return items
def subscription_imdb(name,url): if os.path.isfile(SUB_IMDB_FILE): existing = read_from_file(SUB_IMDB_FILE) if os.path.isfile(SUB_FILE): s = read_from_file(SUB_FILE) show_list = s.split('\n') for show in show_list: if show != '' and not show in existing: dialog = xbmcgui.Dialog() menu_texts = [] menu_data = [] params = {} params["title"] = show params["view"] = "simple" params["count"] = "10" params["title_type"] = "tv_series,mini_series,tv_special" url = "%s%s" % ("http://www.imdb.com/search/title?", urllib.urlencode(params)) body = open_url(url) first_show = regex_get_all(body, '<tr class=', '</tr>') if len(first_show) == 565: all_td = regex_get_all(first_show, '<td', '</td>') imdb_id = regex_from_to(all_td[1], '/title/', '/') else: for f in first_show: all_td = regex_get_all(f, '<td', '</td>')#year_type"> imdb_id = regex_from_to(all_td[1], '/title/', '/')#/"> title = regex_from_to(all_td[1], '/">', '</a').replace("'", "'") + ' ' + regex_from_to(f, 'year_type">', '</span>') menu_data.append(imdb_id) menu_texts.append(title) menu_id = dialog.select('Select Show', menu_texts) if(menu_id < 0): return (None, None) dialog.close() else: imdb_id = menu_data[menu_id] text="%s<>%s" % (show,imdb_id) add_to_list(text, SUB_IMDB_FILE) else: notification('My Subsciptions', 'No new shows found', '3000', iconart) get_subscriptions(name,url)
def get_subscriptions(name,url): content = read_from_file(SUB_IMDB_FILE) lines = content.split('\n') for line in lines: data = line.split('<>') if len(data) == 2: tv_show_name = clean_file_name(data[0]) tv_show_imdb = data[1] tv_show_mode = "3" create_tv_show_strm_files(tv_show_name, tv_show_imdb, tv_show_mode, TV_SHOWS_PATH) xbmc.executebuiltin('UpdateLibrary(video)')
def subscription_menu(): items = [] s = read_from_file(SUBSCRIPTION_FILE) menu_items = s.split('\n') for menu_item in menu_items: if len(menu_item) < 3: break data = menu_item.split('\t') item_name = data[0] item_data = data[1] items.append(create_item_tuple('%s [%s]' % (item_name, item_data), 'unsubscribe', data=item_data, isFolder=False)) return items
def repositories(addon_name): dialog = xbmcgui.Dialog() list = [] repo_list = [] dp = xbmcgui.DialogProgress() dp.create('Repository') directories = os.listdir(check_path) count = 0 for d in directories: if d.startswith('repository'): addonpath = os.path.join(check_path, d) percent = 25 dp.update(percent, "Scanning installed repositories") for file in glob.glob(os.path.join(addonpath, "addon.xml")): text = read_from_file(file) repo_url = regex_from_to(text, '<datadir zip="true">', '</datadir>') req = urllib2.Request(repo_url) percent = 50 dp.update(percent, "Fetching repository addon information") req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) link=response.read() response.close() if repo_url.find('bitbucket') > 0: match = re.compile('class="pjax-trigger execute"><span class="aui-icon aui-icon-small aui-iconfont-devtools-folder-closed"></span>(.+?)</a>').findall(link) else: match = re.compile('<a href="(.+?)/">').findall(link) nItem = len(match) for addons in match: if addons.startswith('plugin') or addons.startswith('script') or addons.startswith('skin') or addons.startswith('metadata'): percent = 75 dp.update(percent, "Matching addon to repository") repo_list.append("<<%s/%s>>" % (addons, d)) except: pass if str(repo_list).find(addon_name) > 0: repo_name = regex_from_to(str(repo_list), addon_name + "/", ">>") repo_path = os.path.join(check_path, repo_name) if os.path.exists(repo_path): shutil.rmtree(repo_path) percent = 100 dp.update(percent, "Repository removed") time.sleep(1) else: dialog.ok(repo_name, "", "No repository found") else: dialog.ok(repo_name, "", "No repository found")
def add_to_list(list, file): if find_list(list, file) >= 0: return if os.path.isfile(file): content = read_from_file(file) else: content = "" lines = content.split('\n') s = '%s\n' % list for line in lines: if len(line) > 0: s = s + line + '\n' write_to_file(file, s)
def add_search_query(query): if find_search_query(query) >= 0: return if os.path.isfile(SEARCH_FILE): content = read_from_file(SEARCH_FILE) else: content = "" lines = content.split('\n') s = '%s\n' % query for line in lines: if len(line) > 0: s = s + line + '\n' write_to_file(SEARCH_FILE, s)
def generate_frames_imagelist(): csvfilename = '/home/yf/data/umdfaces/umdfaces_videos_ultraface.csv' df = common.read_from_file(csvfilename) imagelist_name = 'umdfaces_frames_imagelist_tmp.txt' imagelist = open(imagelist_name, 'w') id = 0 for row in df.iterrows(): #Extract Important Imformation file_name = row[1]['FILE'] #Create Dir if not Exist file_name = file_name.strip() dir_name = file_name.split('/')[0] image_name = file_name.split(',')[0] line_content = image_name + ' ' + str(id) + '\n' imagelist.writelines(line_content) print 'imagelist done!'
def set_movie_meta(listitem, imdb_id, path): (data_file, poster_file, fanart_file, poster_missing, fanart_missing) = _get_meta_paths(imdb_id, path) if os.path.isfile(data_file): try: content = read_from_file(data_file) data = content.split('\n') title = data[0] year = data[1] genre = data[2] tagline = data[3] overview = data[4] duration = data[5] rating = data[6] votes = data[7] premiered = data[8] mpaa = data[9] listitem.setProperty("Video", "true") listitem.setProperty("IsPlayable", "true") listitem.setInfo(type='Video', infoLabels={ 'title': title, 'year': int(year), 'genre': genre, 'tagline': tagline, 'plot': overview, 'duration': duration, 'rating': float(rating), 'votes': votes, 'premiered': premiered, 'mpaa': mpaa, 'code': imdb_id }) except: pass #print "Couldn't add meta for %s" % (imdb_id) if os.path.isfile(poster_file) and USE_POSTERS: listitem.setThumbnailImage(poster_file) if os.path.isfile(fanart_file) and USE_FANART: listitem.setProperty('fanart_image', fanart_file) return listitem
def generate_batch_imagelist_from_csv(): ori = ((common.ORI_BATCH1, common.PROCESSED_BATCH1), (common.ORI_BATCH2, common.PROCESSED_BATCH2), (common.ORI_BATCH3, common.PROCESSED_BATCH3)) for pairs in ori: df = common.read_from_file(pairs[0] + pairs[0].split('/')[1] + CSV_FILE_NAME) imagelist_name = pairs[0].split('/')[1] + '_imagelist_tmp.txt' imagelist = open(imagelist_name, 'w') id = 0 for row in df.iterrows(): #Extract Important Imformation file_name = row[1]['FILE'] #Create Dir if not Exist file_name = file_name.strip() dir_name = file_name.split('/')[0] image_name = file_name.split(',')[0] line_content = image_name + ' ' + str(id) + '\n' imagelist.writelines(line_content) LOG.info('Process %s Done!' % (file_name)) print 'imagelist done!'
def set_movie_meta(listitem, imdb_id, path): (data_file, poster_file, fanart_file, poster_missing, fanart_missing) = _get_meta_paths(imdb_id, path) if os.path.isfile(data_file): try: content = read_from_file(data_file) data = content.split('\n') title = data[0] year = data[1] genre = data[2] tagline = data[3] overview = data[4] duration = data[5] rating = data[6] votes = data[7] premiered = data[8] mpaa = data[9] listitem.setInfo('video', {'title': title, 'year': int(year), 'genre': genre, 'tagline': tagline, 'plot': overview, 'duration': duration, 'rating': float(rating), 'votes': votes, 'premiered': premiered, 'mpaa': mpaa, 'code': imdb_id}) except: pass #print "Couldn't add meta for %s" % (imdb_id) if os.path.isfile(poster_file) and USE_POSTERS: listitem.setThumbnailImage(poster_file) if os.path.isfile(fanart_file) and USE_FANART: listitem.setProperty('fanart_image', fanart_file) return listitem
def run(nav_file): """ Generates monthly sharpe ratio for each fund using a rolling window of the last 12 months. Uses this data to generate a rank file that specifies which fund to invest in each month. The fund chosen each month is the one with the highest sharpe ratio. """ # create data directory common.create_dir(data_dir) # read nav data nav_data = common.read_from_file(nav_file) # generate monthly sharpe ratio sharpe_data = get_sharpe_data(nav_data) sharpe_data_file = os.path.join(data_dir, sharpe_data_file_name) common.write_to_file(sharpe_data_file, sharpe_data) # generate sharpe ranking sharpe_rank_data = get_sharpe_rank_data(nav_data, sharpe_data) sharpe_rank_data_file = os.path.join(data_dir, sharpe_rank_file_name) common.write_to_file(sharpe_rank_data_file, sharpe_rank_data)
def find_shows(name,url): plugins = os.listdir(USERDATA) y = read_from_file(yrlist) yr_list = y.split('\n') for addons in plugins: addon_path = os.path.join(USERDATA, addons) if 'plugin.video.' in addon_path: subdirpath = os.listdir(addon_path) for s in subdirpath: if 'show' in s.lower(): show_path = os.path.join(addon_path, s) subshowpath = os.listdir(show_path) for s in subshowpath: season_path = os.path.join(show_path, s) text = s.replace('_',' ').replace(' s ',' ').rstrip() for yr in yr_list: if yr in text: text = text.replace(yr,'').rstrip() if text.find('(') >-1: text = text[:text.find('(')].rstrip() add_to_list(text.lower(), SUB_FILE) subscription_imdb(name,url)
def read_server_group_mapping(mapping_file): """Given a mapping file path, read it and return a dict with its contents. Arguments: mapping_file -- the path to the mapping file (e.g. /tmp/mapping.properties) The mapping file name is not relevant. The content of the file should be in the format runtime_name=server_group. One per line, just like a java properties file. E.g.: app1=cluster_group app2=web_group """ mapping = {} if os.path.isfile(mapping_file): lines = common.read_from_file(mapping_file) for line in lines: raw_mapping = line.strip().split("=") if len(raw_mapping) < 2: continue (runtime_name, server_group_name) = raw_mapping mapping[runtime_name] = server_group_name return mapping
def receive_signal(signum, stack): data = read_from_file(other_data_file) print(f'Received from app2 data: {data}') out_data = input('What do you want to send?') send_data(data_file, out_data, other_pid, send_signal)
def retrieve_facial_image_by_congealingcomplex(full_image_path, force_continue=True): """Retrieve the facial image by using congealingcomplex. :param full_image_path: the path of the full image :type full_image_path: string :param force_continue: whether crop facial images by using bbox coordinates :type force_continue: boolean :return: the facial image :rtype: numpy array """ def call_congealingcomplex(facial_image): """Call congealingcomplex to perform face frontalization. :param facial_image: the facial image :type facial_image: numpy array :return: the processed facial image :rtype: numpy array """ input_image_path = os.path.join("/tmp", "input_image.jpg") output_image_path = os.path.join("/tmp", "output_image.jpg") cv2.imwrite(input_image_path, facial_image) input_image_info_path = os.path.join("/tmp", "input_image.txt") output_image_info_path = os.path.join("/tmp", "output_image.txt") with open(input_image_info_path, "w") as text_file: text_file.write("{}\n".format(input_image_path)) with open(output_image_info_path, "w") as text_file: text_file.write("{}\n".format(output_image_path)) subprocess.call([os.path.join(common.CONGEALINGCOMPLEX_PATH, "funnelReal"), \ input_image_info_path, \ os.path.join(common.CONGEALINGCOMPLEX_PATH, "people.train"), \ output_image_info_path]) # Read the processed facial image processed_facial_image = cv2.imread(output_image_path) # Omit the totally black rows and columns gray_processed_facial_image = cv2.cvtColor(processed_facial_image, cv2.COLOR_BGR2GRAY) cumsum_in_row = np.cumsum(gray_processed_facial_image, axis=1) valid_row_indexes = cumsum_in_row[:, -1] > 0 cumsum_in_column = np.cumsum(gray_processed_facial_image, axis=0) valid_column_indexes = cumsum_in_column[-1, :] > 0 return processed_facial_image[valid_row_indexes, :, :][:, valid_column_indexes, :] try: # Read the coordinates of facial image from the bbox file bbox_file_path = full_image_path + common.BBOX_EXTENSION y, x, w, h = common.read_from_file(bbox_file_path) # Find the middle point of the bounding rectangle x_middle = x + 0.5 * h y_middle = y + 0.5 * w # Make the bouding square a little bit larger x_start = int(x_middle - 0.8 * h) x_end = int(x_middle + 0.8 * h) y_start = int(y_middle - 0.8 * w) y_end = int(y_middle + 0.8 * w) # Retrieve the original facial image full_image = cv2.imread(full_image_path) facial_image = full_image[max(x_start, 0):min(x_end, full_image.shape[0]), max(y_start, 0):min(y_end, full_image.shape[1]), :] # Call congealingcomplex and resize it facial_image = call_congealingcomplex(facial_image) facial_image = cv2.resize(facial_image, dsize=(common.FACIAL_IMAGE_SIZE, common.FACIAL_IMAGE_SIZE)) # Successful case assert facial_image is not None return facial_image except: # Failure case if force_continue: return retrieve_facial_image_by_bbox(full_image_path) else: return None
def batch_work(ori, setFile): setfile_ = open(setFile, 'w') for annofile in ori: df = common.read_from_file(SOURCE_IMG_FILE_FOLDER+ annofile + annoDict[annofile]) for row in df.iterrows(): #Extract Important Imformation file_name = row[1]['FILE'] img_file_name_no_jpg = file_name.split('/')[1].split('.jpg')[0] label_full_anno_file_name = LABEL_FILE_FOLDER + img_file_name_no_jpg + '.txt' full_path_image_name = SOURCE_IMG_FILE_FOLDER + annofile + file_name if not os.path.exists(CROP_IMG_FILE_FOLDER + annofile + file_name.split('/')[0]): os.mkdir(CROP_IMG_FILE_FOLDER + annofile + file_name.split('/')[0]) ang_path_image_name = CROP_IMG_FILE_FOLDER + annofile + file_name.split('.jpg')[0]+'_crop.jpg' fullImg = os.path.abspath(full_path_image_name) + '\n' print('label file: %s, and full_path_img : %s'%(label_full_anno_file_name, full_path_image_name)) label_file_ = open(label_full_anno_file_name, 'w') roi_x = int(row[1]['FACE_X']) roi_y = int(row[1]['FACE_Y']) roi_w = int(row[1]['FACE_WIDTH']) roi_h = int(row[1]['FACE_HEIGHT']) yaw = row[1]['YAW'] pitch = row[1]['PITCH'] roll = row[1]['ROLL'] pr_female = row[1]['PR_FEMALE'] pr_male = row[1]['PR_MALE'] boolGlass = row[1]['BOOLGLASS'] src = cv2.imread(os.path.abspath(full_path_image_name)) xmin = np.maximum(roi_x - minMargin / 2, 0) xmax = np.minimum(roi_x + roi_w + minMargin / 2, src.shape[1]) ymin = np.maximum(roi_y - minMargin / 2, 0) ymax = np.minimum(roi_y + roi_h + minMargin / 2, src.shape[0]) cropRoi = src[ymin:ymax, xmin:xmax, :] left_eye_point_x = row[1]['P8X'] - xmin right_eye_point_x = row[1]['P11X'] -xmin nose_point_x = row[1]['P15X'] -xmin left_mouse_point_x = row[1]['P18X'] -xmin right_mouse_point_x = row[1]['P20X'] - xmin left_eye_point_y = row[1]['P8Y'] - ymin right_eye_point_y = row[1]['P11Y'] -ymin nose_point_y = row[1]['P15Y'] - ymin left_mouse_point_y = row[1]['P18Y'] -ymin right_mouse_point_y = row[1]['P20Y'] - ymin vision_left_eye = row[1]['VIS8'] vision_right_eye = row[1]['VIS11'] vision_nose = row[1]['VIS15'] vision_left_mouth = row[1]['VIS18'] vision_right_mouth = row[1]['VIS20'] if np.min([vision_left_eye, vision_right_eye, vision_nose, vision_left_mouth, vision_right_mouth]) < vision_threold: continue if nose_point_y < np.min([left_eye_point_y, right_eye_point_y]): continue cv2.imwrite(ang_path_image_name, cropRoi) if 1: pointSet = [] pointSet.append((int(left_eye_point_x), int(left_eye_point_y))) pointSet.append((int(right_eye_point_x), int(right_eye_point_y))) pointSet.append((int(nose_point_x), int(nose_point_y))) pointSet.append((int(left_mouse_point_x), int(left_mouse_point_y))) pointSet.append((int(right_mouse_point_x), int(right_mouse_point_y))) for ii in range(5): cv2.circle(cropRoi, pointSet[ii], 3, (0,0,213), -1) ang_path_image_name_new = CROP_IMG_FILE_FOLDER_DRAW + img_file_name_no_jpg + '_crop.jpg' cv2.imwrite(ang_path_image_name_new, cropRoi) setfile_.writelines(os.path.abspath(ang_path_image_name) + '\n') content = str(left_eye_point_x) + ' ' + str(right_eye_point_x) + ' ' + str(nose_point_x) + ' ' + str(left_mouse_point_x) + ' ' + str(right_mouse_point_x) + ' ' + str(left_eye_point_y) + ' ' + str(right_eye_point_y) + ' ' + str(nose_point_y) + ' ' + str(left_mouse_point_y) + ' ' + str(right_mouse_point_y) + ' ' + str(yaw) + ' ' + str(pitch) + ' ' + str(roll) + ' ' + str(pr_female) + ' ' + str(pr_male) + ' ' + str(boolGlass) + '\n' label_file_.write(content) label_file_.close() setfile_.close()
def run(nav_file, ma_type): nav_data = common.read_from_file(nav_file) fund_names = nav_data[0].split(',')[1:] del nav_data[1:7] ma_data = get_ma_data(nav_data) del nav_data[0:7] cashflows = common.init_array_dict(fund_names) fund_inv_dict = common.init_dict(fund_names) last_inv_dict = common.init_dict(fund_names, default_inv) returns_halfyr = common.init_array_dict(fund_names) returns_annual = common.init_array_dict(fund_names) units_dict_halfyr = common.init_dict(fund_names) units_dict_annual = common.init_dict(fund_names) units_dict_overall = common.init_dict(fund_names) cnt = len(nav_data) max_total_inv = default_inv * (cnt - 1) for i in xrange(0, cnt): row_data = nav_data[i].split(',') dt = datetime.strptime(row_data[0], '%d-%m-%Y') fund_nav = row_data[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav) # half-yearly returns for each fund if i % 6 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr) for fund in fund_names: cashflows_halfyr = cashflows[fund][i-6:i] # slice last 6 months cashflows if is_cashflow_missing(cashflows_halfyr): continue cf = (dt, wealth[fund]) cashflows_halfyr.append(cf) ret = common.xirr(cashflows_halfyr) returns_halfyr[fund].append(ret) # clean up for next pass units_dict_halfyr = common.init_dict(fund_names) # annual returns for each fund if i % 12 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual) for fund in fund_names: cashflows_annual = cashflows[fund][i-12:i] # slice last 12 months cashflows if is_cashflow_missing(cashflows_annual): continue cf = (dt, wealth[fund]) cashflows_annual.append(cf) ret = common.xirr(cashflows_annual) returns_annual[fund].append(ret) # clean up for next pass units_dict_annual = common.init_dict(fund_names) # no investment on the last date if i == cnt - 1: break for f in fund_names: # cap total investment allowed_inv = max_total_inv - fund_inv_dict[f] prev_inv = last_inv_dict[f] nav = fund_nav_dict[f] ma = ma_data[f][i] mnt_inv = get_mnt_inv(ma_type, prev_inv, nav, ma) mnt_inv = min(mnt_inv, allowed_inv) units = mnt_inv / nav units_dict_overall[f] += units units_dict_halfyr[f] += units units_dict_annual[f] += units last_inv_dict[f] = mnt_inv fund_inv_dict[f] += mnt_inv cf = (dt, -mnt_inv) cashflows[f].append(cf) file_data = [] header_line = \ 'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \ 'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \ 'Annual Return Mean,Annual Return Std Dev,Annual Sharpe' file_data.append(header_line) # final wealth nav_line = nav_data[cnt - 1].split(',')[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall) # performance stats for each fund last_date = nav_data[cnt - 1].split(',')[0] dt = datetime.strptime(last_date, '%d-%m-%Y') for fund in sorted(fund_names): fund_cashflows = cashflows[fund][:] cf = (dt, wealth[fund]) fund_cashflows.append(cf) fund_inv = fund_inv_dict[fund] abs_return = ((wealth[fund] / fund_inv) - 1) ann_return = common.xirr(fund_cashflows) hfr = returns_halfyr[fund] halfyr_rf_rate = common.get_rf_rate('half-yearly') halfyr_return_mean = numpy.mean(hfr) halfyr_return_std = numpy.std(hfr) halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate) afr = returns_annual[fund] annual_rf_rate = common.get_rf_rate('annual') annual_return_mean = numpy.mean(afr) annual_return_std = numpy.std(afr) annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate) line_data = \ fund + ',' + str(fund_inv) + ',' + str(wealth[fund]) + ',' + \ str(abs_return) + ',' + str(ann_return) + ',' + \ str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \ str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \ str(annual_return_std) + ',' + str(annual_sharpe) file_data.append(line_data) ma_file_name = 'ma_' + ma_type + '.csv' ma_file = os.path.join(data_dir, ma_file_name) common.write_to_file(ma_file, file_data)
def run(nav_file, rank_file): """ Generates return statistics based on sharpe ratio ranking data. """ # create data directory common.create_dir(data_dir) # read data files nav_data = common.read_from_file(nav_file) rank_data = common.read_from_file(rank_file) # remove redundant entries in nav_data target_date = rank_data[1].split(',')[0] common.trim_data(nav_data, target_date) assert len(nav_data) == len(rank_data) # retrieve fund names # the first column (date) is skipped fund_names = nav_data[0].split(',')[1:] # initialize cashflows_halfyr = [] cashflows_annual = [] cashflows_overall = [] returns_halfyr = [] returns_annual = [] units_dict_halfyr = common.init_dict(fund_names) units_dict_annual = common.init_dict(fund_names) units_dict_overall = common.init_dict(fund_names) # remove header line del nav_data[0] del rank_data[0] # compute cashflows and returns cnt = len(nav_data) for i in range(0, cnt): (date, fund, nav) = rank_data[i].split(',') dt = datetime.strptime(date, '%d-%m-%Y') # half-yearly returns if i % 6 == 0 and i > 0: nav_line = nav_data[i].split(',')[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = get_wealth(fund_nav_dict, units_dict_halfyr) cf = (dt, wealth) cashflows_halfyr.append(cf) ret = common.xirr(cashflows_halfyr) returns_halfyr.append(ret) # clean up for next pass del cashflows_halfyr[:] units_dict_halfyr[f] = common.init_dict(fund_names) # annual returns if i % 12 == 0 and i > 0: nav_line = nav_data[i].split(',')[1:] nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = get_wealth(nav_dict, units_dict_annual) cf = (dt, wealth) cashflows_annual.append(cf) ret = common.xirr(cashflows_annual) returns_annual.append(ret) # clean up for next pass del cashflows_annual[:] units_dict_annual[f] = common.init_dict(fund_names) # no investment on the last date if i == cnt - 1: break # units invested units = mnt_inv / float(nav) units_dict_halfyr[fund] += units units_dict_annual[fund] += units units_dict_overall[fund] += units # cash outflow cf = (dt, -mnt_inv) cashflows_halfyr.append(cf) cashflows_annual.append(cf) cashflows_overall.append(cf) file_data = [] # investment details file_data.append('Investment Details') file_data.append('Fund,Units') for f in units_dict_overall: if units_dict_overall[f] > 0: line_data = f + ',' + str(units_dict_overall[f]) file_data.append(line_data) file_data.append('\n') # total investment num_inv = len(cashflows_overall) total_inv = num_inv * mnt_inv file_data.append('Investment,' + str(total_inv)) # final wealth nav_line = nav_data[cnt - 1].split(',')[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = get_wealth(fund_nav_dict, units_dict_overall) file_data.append('Wealth,' + str(wealth)) # absolute return abs_return = ((wealth / total_inv) - 1) file_data.append('Absolute Return,' + str(abs_return)) # annualized return last_date = nav_data[cnt - 1].split(',')[0] dt = datetime.strptime(last_date, '%d-%m-%Y') cf = (dt, wealth) cashflows_overall.append(cf) annual_return = common.xirr(cashflows_overall) file_data.append('Annualized Return,' + str(annual_return)) file_data.append('\n') file_data.append('Stats,Mean,Std Deviation, Sharpe Ratio') # half-yearly return stats halfyr_rf_rate = common.get_rf_rate('half-yearly') halfyr_mean = numpy.mean(returns_halfyr) halfyr_stdev = numpy.std(returns_halfyr) halfyr_sharpe = common.get_sharpe_ratio(returns_halfyr, halfyr_rf_rate) file_data.append('Half-Yearly,' + str(halfyr_mean) + ',' + str(halfyr_stdev) + ',' + str(halfyr_sharpe)) # annual return stats annual_rf_rate = common.get_rf_rate('annual') annual_mean = numpy.mean(returns_annual) annual_stdev = numpy.std(returns_annual) annual_sharpe = common.get_sharpe_ratio(returns_annual, annual_rf_rate) file_data.append('Annual,' + str(annual_mean) + ',' + str(annual_stdev) + ',' + str(annual_sharpe)) # save stats to file benchmark_file = os.path.join(data_dir, benchmark_file_name) common.write_to_file(benchmark_file, file_data)
def run(nav_file, ma_type): nav_data = common.read_from_file(nav_file) fund_names = nav_data[0].split(',')[1:] del nav_data[1:7] ma_data = get_ma_data(nav_data) del nav_data[0:7] cashflows = common.init_array_dict(fund_names) fund_inv_dict = common.init_dict(fund_names) fund_corpus_dict = common.init_dict(fund_names) fund_corpus_index_dict = common.init_array_dict(fund_names) last_inv_dict = common.init_dict(fund_names, default_inv) returns_halfyr = common.init_array_dict(fund_names) returns_annual = common.init_array_dict(fund_names) units_dict_halfyr = common.init_dict(fund_names) units_dict_annual = common.init_dict(fund_names) units_dict_overall = common.init_dict(fund_names) cnt = len(nav_data) max_total_inv = default_inv * (cnt - 1) for i in xrange(0, cnt): row_data = nav_data[i].split(',') dt = datetime.strptime(row_data[0], '%d-%m-%Y') fund_nav = row_data[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav) # half-yearly returns for each fund if i % 6 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr) for fund in fund_names: start_corpus = fund_corpus_index_dict[fund][i - 7] end_corpus = fund_corpus_index_dict[fund][i - 1] corpus_wealth = end_corpus - start_corpus total_wealth = wealth[fund] + corpus_wealth cashflows_halfyr = cashflows[fund][ i - 6:i] # slice last 6 months cashflows if is_cashflow_missing(cashflows_halfyr): continue cf = (dt, total_wealth) cashflows_halfyr.append(cf) ret = common.xirr(cashflows_halfyr) returns_halfyr[fund].append(ret) # clean up units_dict_halfyr = common.init_dict(fund_names) # annual returns for each fund if i % 12 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual) for fund in fund_names: start_corpus = fund_corpus_index_dict[fund][i - 13] end_corpus = fund_corpus_index_dict[fund][i - 1] corpus_wealth = end_corpus - start_corpus total_wealth = wealth[fund] + corpus_wealth cashflows_annual = cashflows[fund][ i - 12:i] # slice last 12 months cashflows if is_cashflow_missing(cashflows_annual): continue cf = (dt, wealth[fund] + fund_corpus_dict[fund]) cashflows_annual.append(cf) ret = common.xirr(cashflows_annual) returns_annual[fund].append(ret) # clean up units_dict_annual = common.init_dict(fund_names) # no investment on the last date if i == cnt - 1: break for f in fund_names: # cap total investment allowed_inv = max_total_inv - fund_inv_dict[f] prev_inv = last_inv_dict[f] nav = fund_nav_dict[f] ma = ma_data[f][i] # equity investment mnt_inv = get_mnt_inv(ma_type, prev_inv, nav, ma) mnt_inv = min(mnt_inv, allowed_inv) last_inv_dict[f] = mnt_inv allowed_inv -= mnt_inv # debt investment corpus = fund_corpus_dict[f] debt_inv = default_inv - mnt_inv if debt_inv < 0: debt_inv = -min(mnt_inv - default_inv, corpus) else: debt_inv = min(debt_inv, allowed_inv) # corpus investment + interest corpus += debt_inv interest = corpus * int_rate corpus += interest fund_corpus_dict[f] = corpus fund_corpus_index_dict[f].append(corpus) # total investment total_inv = mnt_inv + debt_inv fund_inv_dict[f] += total_inv # invested units units = mnt_inv / nav units_dict_overall[f] += units units_dict_halfyr[f] += units units_dict_annual[f] += units # cashflows cf = (dt, -total_inv) cashflows[f].append(cf) # debugging # if f == 'Birla_Advantage_Fund': # print '%d\t%d\t%d\t%.2f\t%d\t%d' % (mnt_inv, debt_inv, round(fund_inv_dict[f]), units, -total_inv, round(corpus)) file_data = [] header_line = \ 'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \ 'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \ 'Annual Return Mean,Annual Return Std Dev,Annual Sharpe' file_data.append(header_line) # final wealth nav_line = nav_data[cnt - 1].split(',')[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall) # performance stats for each fund last_date = nav_data[cnt - 1].split(',')[0] dt = datetime.strptime(last_date, '%d-%m-%Y') for fund in sorted(fund_names): total_wealth = wealth[fund] + fund_corpus_dict[fund] fund_cashflows = cashflows[fund][:] cf = (dt, total_wealth) fund_cashflows.append(cf) fund_inv = fund_inv_dict[fund] abs_return = ((total_wealth / fund_inv) - 1) ann_return = common.xirr(fund_cashflows) hfr = returns_halfyr[fund] halfyr_rf_rate = common.get_rf_rate('half-yearly') halfyr_return_mean = numpy.mean(hfr) halfyr_return_std = numpy.std(hfr) halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate) afr = returns_annual[fund] annual_rf_rate = common.get_rf_rate('annual') annual_return_mean = numpy.mean(afr) annual_return_std = numpy.std(afr) annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate) line_data = \ fund + ',' + str(fund_inv) + ',' + str(total_wealth) + ',' + \ str(abs_return) + ',' + str(ann_return) + ',' + \ str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \ str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \ str(annual_return_std) + ',' + str(annual_sharpe) file_data.append(line_data) ma_file_name = 'ma_with_debt_' + ma_type + '.csv' ma_file = os.path.join(data_dir, ma_file_name) common.write_to_file(ma_file, file_data)
def run(nav_file): # create data directory common.create_dir(data_dir) # read nav data nav_data = common.read_from_file(nav_file) # remove first 12 entries in nav_data # to compare results with benchmark del nav_data[1:13] # retrieve fund names # the first column (date) is skipped fund_names = nav_data[0].split(',')[1:] # initialize cashflows = [] returns_halfyr = common.init_array_dict(fund_names) returns_annual = common.init_array_dict(fund_names) units_dict_halfyr = common.init_dict(fund_names) units_dict_annual = common.init_dict(fund_names) units_dict_overall = common.init_dict(fund_names) # remove header line del nav_data[0] # compute cashflows and returns cnt = len(nav_data) for i in range(0, cnt): row_data = nav_data[i].split(',') dt = datetime.strptime(row_data[0], '%d-%m-%Y') fund_nav = row_data[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav) # half-yearly returns for each fund if i % 6 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr) for fund in fund_names: cashflows_halfyr = cashflows[i-6:i] # slice last 6 months cashflows cf = (dt, wealth[fund]) cashflows_halfyr.append(cf) ret = common.xirr(cashflows_halfyr) returns_halfyr[fund].append(ret) # clean up for next pass units_dict_halfyr = common.init_dict(fund_names) # annual returns for each fund if i % 12 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual) for fund in fund_names: cashflows_annual = cashflows[i-12:i] # slice last 12 months cashflows cf = (dt, wealth[fund]) cashflows_annual.append(cf) ret = common.xirr(cashflows_annual) returns_annual[fund].append(ret) # clean up for next pass units_dict_annual = common.init_dict(fund_names) # no investment on the last date if i == cnt - 1: break # invested units for fund in fund_names: nav = fund_nav_dict[fund] units = mnt_inv / nav units_dict_halfyr[fund] += units units_dict_annual[fund] += units units_dict_overall[fund] += units # cash outflow cf = (dt, -mnt_inv) cashflows.append(cf) file_data = [] header_line = \ 'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \ 'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \ 'Annual Return Mean,Annual Return Std Dev,Annual Sharpe' file_data.append(header_line) # total investment num_inv = len(cashflows) total_inv = num_inv * mnt_inv # final wealth nav_line = nav_data[cnt - 1].split(',')[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall) # performance stats for each fund last_date = nav_data[cnt - 1].split(',')[0] dt = datetime.strptime(last_date, '%d-%m-%Y') for fund in sorted(fund_names): fund_cashflows = cashflows[:] cf = (dt, wealth[fund]) fund_cashflows.append(cf) abs_return = ((wealth[fund] / total_inv) - 1) ann_return = common.xirr(fund_cashflows) hfr = returns_halfyr[fund] halfyr_rf_rate = common.get_rf_rate('half-yearly') halfyr_return_mean = numpy.mean(hfr) halfyr_return_std = numpy.std(hfr) halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate) afr = returns_annual[fund] annual_rf_rate = common.get_rf_rate('annual') annual_return_mean = numpy.mean(afr) annual_return_std = numpy.std(afr) annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate) line_data = \ fund + ',' + str(total_inv) + ',' + str(wealth[fund]) + ',' + \ str(abs_return) + ',' + str(ann_return) + ',' + \ str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \ str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \ str(annual_return_std) + ',' + str(annual_sharpe) file_data.append(line_data) regular_sip_file = os.path.join(data_dir, regular_sip_file_name) common.write_to_file(regular_sip_file, file_data)
def CATEGORIES(): af = "test" list = [] encr = "" py_list = [] directories = os.listdir(check_path) for d in directories: if d == "script.module.xbmc.ads": addonpath = os.path.join(check_path, d) list.append(d) addDir('[COLOR cyan]'+ d + '[/COLOR]' + " (ADVERTS!)",d,2,'','list addons', d) if d != "plugin.video.gachecker": addonpath = os.path.join(check_path, d) for py_file in glob.glob(os.path.join(addonpath, "*.py")): text = read_from_file(py_file) if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0 or text.find('ADDON_ADVERTISE')>0 or text.find('OOo') > 0: if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0: gf = ' GA references found, ' else: gf = ' No GA references found ' if text.find('OOo') > 0: af = 'HIDDEN CODE' else: af = "." if text.count('GA(') == 0: cnt = '0 events ' else: cnt = "%s %s" % (text.count('GA('), "events ") list.append(d) addDir('[COLOR cyan]'+ d + '[/COLOR]' + gf + cnt + af,d,2,'','list addons', py_file) if os.path.isdir(addonpath): directories = os.listdir(addonpath) for sd in directories: subd = os.path.join(check_path, d, sd) for py_file in glob.glob(os.path.join(subd, "*.py")): text = read_from_file(py_file) if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0 or text.find('ADDON_ADVERTISE')>0 or text.find('OOo') > 0: if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0: gf = ' GA references found, ' else: gf = ' No GA references found ' if text.find('OOo') > 0: af = 'HIDDEN CODE' else: af = "." if text.count('GA(') == 0: cnt = '0 events ' else: cnt = "%s %s" % (text.count('GA('), "events ") list.append(d) addDir('[COLOR cyan]'+ d + '[/COLOR]' + gf + cnt + af,d,2,'','list addons', py_file) if os.path.isdir(subd): directories = os.listdir(subd) for sd2 in directories: subd2 = os.path.join(check_path, d, sd, sd2) for py_file in glob.glob(os.path.join(subd2, "*.py")): text = read_from_file(py_file) if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0 or text.find('ADDON_ADVERTISE')>0 or text.find('OOo') > 0: if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0: gf = ' GA references found, ' else: gf = ' No GA references found ' if text.find('OOo') > 0: af = 'HIDDEN CODE' else: af = "." if text.count('GA(') == 0: cnt = '0 events ' else: cnt = "%s %s" % (text.count('GA('), "events ") list.append(d) addDir('[COLOR cyan]'+ d + '[/COLOR]' + gf + cnt + af,d,2,'','list addons', py_file) if os.path.isdir(subd2): directories = os.listdir(subd2) for sd3 in directories: subd3 = os.path.join(check_path, d, sd, sd2, sd3) for py_file in glob.glob(os.path.join(subd3, "*.py")): text = read_from_file(py_file) if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0 or text.find('ADDON_ADVERTISE')>0 or text.find('OOo') > 0: if text.find('google-analytics') > 0 or text.find('GA(') > 0 or text.find('UA-') > 0: gf = ' GA references found, ' else: gf = ' No GA references found ' if text.find('OOo') > 0: af = 'HIDDEN CODE' else: af = "." if text.count('GA(') == 0: cnt = '0 events ' else: cnt = "%s %s" % (text.count('GA('), "events ") list.append(d) addDir('[COLOR cyan]'+ d + '[/COLOR]' + gf + cnt + af,d,2,'','list addons', py_file) if len(list) == 0: addDir("No Google Analytics, ads or hidden code found","",1,'','list addons','')
def run(nav_file, ma_type): nav_data = common.read_from_file(nav_file) fund_names = nav_data[0].split(',')[1:] del nav_data[1:7] ma_data = get_ma_data(nav_data) del nav_data[0:7] cashflows = common.init_array_dict(fund_names) fund_inv_dict = common.init_dict(fund_names) fund_corpus_dict = common.init_dict(fund_names) fund_corpus_index_dict = common.init_array_dict(fund_names) last_inv_dict = common.init_dict(fund_names, default_inv) returns_halfyr = common.init_array_dict(fund_names) returns_annual = common.init_array_dict(fund_names) units_dict_halfyr = common.init_dict(fund_names) units_dict_annual = common.init_dict(fund_names) units_dict_overall = common.init_dict(fund_names) cnt = len(nav_data) max_total_inv = default_inv * (cnt - 1) for i in xrange(0, cnt): row_data = nav_data[i].split(',') dt = datetime.strptime(row_data[0], '%d-%m-%Y') fund_nav = row_data[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav) # half-yearly returns for each fund if i % 6 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr) for fund in fund_names: start_corpus = fund_corpus_index_dict[fund][i-7] end_corpus = fund_corpus_index_dict[fund][i-1] corpus_wealth = end_corpus - start_corpus total_wealth = wealth[fund] + corpus_wealth cashflows_halfyr = cashflows[fund][i-6:i] # slice last 6 months cashflows if is_cashflow_missing(cashflows_halfyr): continue cf = (dt, total_wealth) cashflows_halfyr.append(cf) ret = common.xirr(cashflows_halfyr) returns_halfyr[fund].append(ret) # clean up units_dict_halfyr = common.init_dict(fund_names) # annual returns for each fund if i % 12 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual) for fund in fund_names: start_corpus = fund_corpus_index_dict[fund][i-13] end_corpus = fund_corpus_index_dict[fund][i-1] corpus_wealth = end_corpus - start_corpus total_wealth = wealth[fund] + corpus_wealth cashflows_annual = cashflows[fund][i-12:i] # slice last 12 months cashflows if is_cashflow_missing(cashflows_annual): continue cf = (dt, wealth[fund] + fund_corpus_dict[fund]) cashflows_annual.append(cf) ret = common.xirr(cashflows_annual) returns_annual[fund].append(ret) # clean up units_dict_annual = common.init_dict(fund_names) # no investment on the last date if i == cnt - 1: break for f in fund_names: # cap total investment allowed_inv = max_total_inv - fund_inv_dict[f] prev_inv = last_inv_dict[f] nav = fund_nav_dict[f] ma = ma_data[f][i] # equity investment mnt_inv = get_mnt_inv(ma_type, prev_inv, nav, ma) mnt_inv = min(mnt_inv, allowed_inv) last_inv_dict[f] = mnt_inv allowed_inv -= mnt_inv # debt investment corpus = fund_corpus_dict[f] debt_inv = default_inv - mnt_inv if debt_inv < 0: debt_inv = -min(mnt_inv - default_inv, corpus) else: debt_inv = min(debt_inv, allowed_inv) # corpus investment + interest corpus += debt_inv interest = corpus * int_rate corpus += interest fund_corpus_dict[f] = corpus fund_corpus_index_dict[f].append(corpus) # total investment total_inv = mnt_inv + debt_inv fund_inv_dict[f] += total_inv # invested units units = mnt_inv / nav units_dict_overall[f] += units units_dict_halfyr[f] += units units_dict_annual[f] += units # cashflows cf = (dt, -total_inv) cashflows[f].append(cf) # debugging # if f == 'Birla_Advantage_Fund': # print '%d\t%d\t%d\t%.2f\t%d\t%d' % (mnt_inv, debt_inv, round(fund_inv_dict[f]), units, -total_inv, round(corpus)) file_data = [] header_line = \ 'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \ 'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \ 'Annual Return Mean,Annual Return Std Dev,Annual Sharpe' file_data.append(header_line) # final wealth nav_line = nav_data[cnt - 1].split(',')[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall) # performance stats for each fund last_date = nav_data[cnt - 1].split(',')[0] dt = datetime.strptime(last_date, '%d-%m-%Y') for fund in sorted(fund_names): total_wealth = wealth[fund] + fund_corpus_dict[fund] fund_cashflows = cashflows[fund][:] cf = (dt, total_wealth) fund_cashflows.append(cf) fund_inv = fund_inv_dict[fund] abs_return = ((total_wealth / fund_inv) - 1) ann_return = common.xirr(fund_cashflows) hfr = returns_halfyr[fund] halfyr_rf_rate = common.get_rf_rate('half-yearly') halfyr_return_mean = numpy.mean(hfr) halfyr_return_std = numpy.std(hfr) halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate) afr = returns_annual[fund] annual_rf_rate = common.get_rf_rate('annual') annual_return_mean = numpy.mean(afr) annual_return_std = numpy.std(afr) annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate) line_data = \ fund + ',' + str(fund_inv) + ',' + str(total_wealth) + ',' + \ str(abs_return) + ',' + str(ann_return) + ',' + \ str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \ str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \ str(annual_return_std) + ',' + str(annual_sharpe) file_data.append(line_data) ma_file_name = 'ma_with_debt_' + ma_type + '.csv' ma_file = os.path.join(data_dir, ma_file_name) common.write_to_file(ma_file, file_data)
def receive_signal(signum, stack): data = read_from_file(other_data_file) print(f'Received from app1 data: {data}') out_data = data[len(data)::-1] print(f'Replying with: {out_data}') send_data(data_file, out_data, other_pid, send_signal)
def retrieve_facial_image_by_congealingcomplex(full_image_path, force_continue=True): """Retrieve the facial image by using congealingcomplex. :param full_image_path: the path of the full image :type full_image_path: string :param force_continue: whether crop facial images by using bbox coordinates :type force_continue: boolean :return: the facial image :rtype: numpy array """ def call_congealingcomplex(facial_image): """Call congealingcomplex to perform face frontalization. :param facial_image: the facial image :type facial_image: numpy array :return: the processed facial image :rtype: numpy array """ input_image_path = os.path.join("/tmp", "input_image.jpg") output_image_path = os.path.join("/tmp", "output_image.jpg") cv2.imwrite(input_image_path, facial_image) input_image_info_path = os.path.join("/tmp", "input_image.txt") output_image_info_path = os.path.join("/tmp", "output_image.txt") with open(input_image_info_path, "w") as text_file: text_file.write("{}\n".format(input_image_path)) with open(output_image_info_path, "w") as text_file: text_file.write("{}\n".format(output_image_path)) subprocess.call([os.path.join(common.CONGEALINGCOMPLEX_PATH, "funnelReal"), \ input_image_info_path, \ os.path.join(common.CONGEALINGCOMPLEX_PATH, "people.train"), \ output_image_info_path]) # Read the processed facial image processed_facial_image = cv2.imread(output_image_path) # Omit the totally black rows and columns gray_processed_facial_image = cv2.cvtColor(processed_facial_image, cv2.COLOR_BGR2GRAY) cumsum_in_row = np.cumsum(gray_processed_facial_image, axis=1) valid_row_indexes = cumsum_in_row[:, -1] > 0 cumsum_in_column = np.cumsum(gray_processed_facial_image, axis=0) valid_column_indexes = cumsum_in_column[-1, :] > 0 return processed_facial_image[ valid_row_indexes, :, :][:, valid_column_indexes, :] try: # Read the coordinates of facial image from the bbox file bbox_file_path = full_image_path + common.BBOX_EXTENSION y, x, w, h = common.read_from_file(bbox_file_path) # Find the middle point of the bounding rectangle x_middle = x + 0.5 * h y_middle = y + 0.5 * w # Make the bouding square a little bit larger x_start = int(x_middle - 0.8 * h) x_end = int(x_middle + 0.8 * h) y_start = int(y_middle - 0.8 * w) y_end = int(y_middle + 0.8 * w) # Retrieve the original facial image full_image = cv2.imread(full_image_path) facial_image = full_image[ max(x_start, 0):min(x_end, full_image.shape[0]), max(y_start, 0):min(y_end, full_image.shape[1]), :] # Call congealingcomplex and resize it facial_image = call_congealingcomplex(facial_image) facial_image = cv2.resize(facial_image, dsize=(common.FACIAL_IMAGE_SIZE, common.FACIAL_IMAGE_SIZE)) # Successful case assert facial_image is not None return facial_image except: # Failure case if force_continue: return retrieve_facial_image_by_bbox(full_image_path) else: return None
def run(nav_file): # create data directory common.create_dir(data_dir) # read nav data nav_data = common.read_from_file(nav_file) # remove first 12 entries in nav_data # to compare results with benchmark del nav_data[1:13] # retrieve fund names # the first column (date) is skipped fund_names = nav_data[0].split(',')[1:] # initialize cashflows = [] returns_halfyr = common.init_array_dict(fund_names) returns_annual = common.init_array_dict(fund_names) units_dict_halfyr = common.init_dict(fund_names) units_dict_annual = common.init_dict(fund_names) units_dict_overall = common.init_dict(fund_names) # remove header line del nav_data[0] # compute cashflows and returns cnt = len(nav_data) for i in range(0, cnt): row_data = nav_data[i].split(',') dt = datetime.strptime(row_data[0], '%d-%m-%Y') fund_nav = row_data[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, fund_nav) # half-yearly returns for each fund if i % 6 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_halfyr) for fund in fund_names: cashflows_halfyr = cashflows[ i - 6:i] # slice last 6 months cashflows cf = (dt, wealth[fund]) cashflows_halfyr.append(cf) ret = common.xirr(cashflows_halfyr) returns_halfyr[fund].append(ret) # clean up for next pass units_dict_halfyr = common.init_dict(fund_names) # annual returns for each fund if i % 12 == 0 and i > 0: wealth = common.get_fund_wealth(fund_nav_dict, units_dict_annual) for fund in fund_names: cashflows_annual = cashflows[ i - 12:i] # slice last 12 months cashflows cf = (dt, wealth[fund]) cashflows_annual.append(cf) ret = common.xirr(cashflows_annual) returns_annual[fund].append(ret) # clean up for next pass units_dict_annual = common.init_dict(fund_names) # no investment on the last date if i == cnt - 1: break # invested units for fund in fund_names: nav = fund_nav_dict[fund] units = mnt_inv / nav units_dict_halfyr[fund] += units units_dict_annual[fund] += units units_dict_overall[fund] += units # cash outflow cf = (dt, -mnt_inv) cashflows.append(cf) file_data = [] header_line = \ 'Fund,Investment,Wealth,Absolute Return,Annualized Return,' + \ 'Half-Yr Return Mean,Half-Yr Return Std Dev,Half-Yr Sharpe,' + \ 'Annual Return Mean,Annual Return Std Dev,Annual Sharpe' file_data.append(header_line) # total investment num_inv = len(cashflows) total_inv = num_inv * mnt_inv # final wealth nav_line = nav_data[cnt - 1].split(',')[1:] fund_nav_dict = common.get_fund_nav_dict(fund_names, nav_line) wealth = common.get_fund_wealth(fund_nav_dict, units_dict_overall) # performance stats for each fund last_date = nav_data[cnt - 1].split(',')[0] dt = datetime.strptime(last_date, '%d-%m-%Y') for fund in sorted(fund_names): fund_cashflows = cashflows[:] cf = (dt, wealth[fund]) fund_cashflows.append(cf) abs_return = ((wealth[fund] / total_inv) - 1) ann_return = common.xirr(fund_cashflows) hfr = returns_halfyr[fund] halfyr_rf_rate = common.get_rf_rate('half-yearly') halfyr_return_mean = numpy.mean(hfr) halfyr_return_std = numpy.std(hfr) halfyr_sharpe = common.get_sharpe_ratio(hfr, halfyr_rf_rate) afr = returns_annual[fund] annual_rf_rate = common.get_rf_rate('annual') annual_return_mean = numpy.mean(afr) annual_return_std = numpy.std(afr) annual_sharpe = common.get_sharpe_ratio(afr, annual_rf_rate) line_data = \ fund + ',' + str(total_inv) + ',' + str(wealth[fund]) + ',' + \ str(abs_return) + ',' + str(ann_return) + ',' + \ str(halfyr_return_mean) + ',' + str(halfyr_return_std) + ',' + \ str(halfyr_sharpe) + ',' + str(annual_return_mean) + ',' + \ str(annual_return_std) + ',' + str(annual_sharpe) file_data.append(line_data) regular_sip_file = os.path.join(data_dir, regular_sip_file_name) common.write_to_file(regular_sip_file, file_data)