def main(): timer = Timer() timer.start() data_prefix = '../data/' fig, axes = plt.subplots(ncols=3, nrows=3, figsize=(12, 4), sharex='col') gs = axes[0, 0].get_gridspec() for ax in axes[:, 0]: ax.remove() ax_left = fig.add_subplot(gs[:, 0]) ax_left.set_axis_off() ax_left.spines['top'].set_visible(False) ax_left.spines['right'].set_visible(False) ax_left.spines['bottom'].set_visible(False) ax_left.spines['left'].set_visible(False) ax_left.set_title('(a)', fontsize=12) axes = axes[:, 1:].ravel() video_title_list = ['Hello', 'Someone like you', 'Rolling in the deep', 'Skyfall', 'Set fire to the rain', 'Hometown glory'] # == == == == == == Part 1: Load data == == == == == == # fig_idx = 0 with open(os.path.join(data_prefix, 'teaser.json'), 'r') as fin: for line in fin: video_json = json.loads(line.rstrip()) daily_view = video_json['insights']['dailyView'] end_date = datetime.strptime(video_json['insights']['endDate'], '%Y-%m-%d') start_date = end_date - timedelta(days=len(daily_view)) date_axis = [start_date + timedelta(days=t) for t in range(len(daily_view))] # plot daily view series axes[fig_idx].plot_date(date_axis, daily_view, 'k-') axes[fig_idx].axvline(x=datetime(2015, 10, 23), color=ColorPalette.TOMATO, linestyle='--', lw=1.5, zorder=30) axes[fig_idx].text(0.3, 0.95, video_title_list[fig_idx], size=10, transform=axes[fig_idx].transAxes, ha='center', va='bottom') axes[fig_idx].tick_params(axis='both', which='major', labelsize=10) axes[fig_idx].yaxis.set_major_formatter(FuncFormatter(concise_fmt)) axes[fig_idx].xaxis.set_major_formatter(mdates.DateFormatter("'%y")) fig_idx += 1 axes[2].set_ylabel('daily views', fontsize=11) axes[0].set_title('(b)', fontsize=12) hide_spines(axes) timer.stop() plt.tight_layout() plt.savefig('../images/intro_teaser.pdf', bbox_inches='tight') plt.show()
def main(): # == == == == == == Part 1: Set up environment == == == == == == # timer = Timer() timer.start() data_prefix = '../data/recsys' num_relevant_by_rank = np.zeros((NUM_REL,)) num_recommended_by_rank = np.zeros((NUM_REC,)) # aggregate by rank1, rank2-5, rank6-10, rank11-15 dense_relevant_in_recommended_mat = np.zeros((NUM_REL, 4)) # aggregate by rank1, rank2-5, rank6-10, rank11-15, rank16-30, rank31-50 dense_recommended_from_relevant_mat = np.zeros((NUM_REC, 6)) relevant_in_recommended_arr = np.zeros((NUM_REL,)) recommended_from_relevant_arr = np.zeros((NUM_REC,)) # == == == == == == Part 2: Load both relevant list and recommended list == == == == == == # for subdir, _, files in os.walk(data_prefix): for f in files: with open(os.path.join(subdir, f), 'r') as fin: for line in fin: network_json = json.loads(line.rstrip()) recommended_list = network_json['recommended_list'][: NUM_REC] relevant_list = network_json['relevant_list'][: NUM_REL] num_relevant_by_rank += np.pad(np.ones(len(relevant_list)), (0, NUM_REL - len(relevant_list)), 'constant') num_recommended_by_rank += np.pad(np.ones(len(recommended_list)), (0, NUM_REC - len(recommended_list)), 'constant') for rel_rank, vid in enumerate(relevant_list): if vid in recommended_list: relevant_in_recommended_arr[rel_rank] += 1 position_on_recommended = recommended_list.index(vid) dense_relevant_in_recommended_mat[rel_rank, switch(position_on_recommended)] += 1 for rec_rank, vid in enumerate(recommended_list): if vid in relevant_list: recommended_from_relevant_arr[rec_rank] += 1 position_on_relevant = relevant_list.index(vid) dense_recommended_from_relevant_mat[rec_rank, switch(position_on_relevant)] += 1 # == == == == == == Part 3: Plot probabilities in each position == == == == == == # fig, axes = plt.subplots(1, 2, figsize=(12, 4)) axes = axes.ravel() color_cycle_6 = ColorPalette.CC6 stackedBarPlot(ax=axes[0], data=dense_relevant_in_recommended_mat / num_relevant_by_rank.reshape(-1, 1), cols=color_cycle_6, edgeCols=['#000000'] * 4, xlabel='position $x$ on relevant list', ylabel='prob. of displaying on recommended list', scale=False, endGaps=True) axes[0].legend([plt.Rectangle((0, 0), 1, 1, fc=color_cycle_6[x], alpha=0.8, ec='k') for x in range(4)], ['position 1', 'position 2-5', 'position 6-10', 'position 11-15'], fontsize=10, frameon=False, loc='upper right', fancybox=False, shadow=True, ncol=1) axes[0].set_title('(a)', fontsize=12) stackedBarPlot(ax=axes[1], data=dense_recommended_from_relevant_mat / num_recommended_by_rank.reshape(-1, 1), cols=ColorPalette.CC6, edgeCols=['#000000'] * 6, xlabel='position $x$ on recommended list', ylabel='prob. of originating from relevant list', scale=False, endGaps=True) axes[1].legend([plt.Rectangle((0, 0), 1, 1, fc=color_cycle_6[x], alpha=0.8, ec='k') for x in range(6)], ['position 1', 'position 2-5', 'position 6-10', 'position 11-15', 'position 16-30', 'position 31-50'], fontsize=10, frameon=False, loc='upper right', fancybox=False, shadow=True, ncol=2) axes[1].set_title('(b)', fontsize=12) for ax in axes: ax.set_ylim(top=1) ax.set_ylim(bottom=0) hide_spines(axes) timer.stop() plt.tight_layout() plt.savefig('../images/data_rel2rec.pdf', bbox_inches='tight') if not platform.system() == 'Linux': plt.show()
def main(): timer = Timer() timer.start() cornflower_blue = ColorPalette.BLUE tomato = ColorPalette.TOMATO color_cycle_4 = ColorPalette.CC4 label_fs = ColorPalette.LABELFS title_fs = ColorPalette.TITLEFS tick_style = ColorPalette.TICKSTYLE bar_text_style = ColorPalette.BARTEXTSTYLE data_loader = DataLoader() data_loader.load_embed_content_dict() embed_cid_dict = data_loader.embed_cid_dict embed_genre_dict = data_loader.embed_genre_dict fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(12, 4)) gs = axes[0, 0].get_gridspec() for ax in axes[:, 0]: ax.remove() ax_left = fig.add_subplot(gs[:, 0]) for ax in axes[:, 1]: ax.remove() ax_mid = fig.add_subplot(gs[:, 1]) axes = [ax_left, ax_mid, axes[0, 2], axes[1, 2]] # == == == == == == Part 1: Plot the probability of forming a persistent link == == == == == == # p_form_list = [] p_persistent_list = [] with open('./justify_persistent_link.log', 'r') as fin: for line in fin: _, p_form, _, p_persistent = re.split(',|:', line) p_form = float(p_form.strip()) p_persistent = float(p_persistent.strip()) p_form_list.append(p_form) p_persistent_list.append(p_persistent) axes[0].plot(p_form_list, p_persistent_list, color=cornflower_blue) for p_form in [0.5, 0.7, 0.8, 0.9]: p_persistent = p_persistent_list[int(p_form * 100)] axes[0].scatter(p_form, p_persistent, s=15, c=tomato, edgecolors='k', zorder=30) axes[0].text(p_form - 0.01, p_persistent, '({0:.2f}, {1:.2f})'.format(p_form, p_persistent), ha='right', va='bottom') axes[0].set_xlabel('prob. of forming a link', fontsize=label_fs) axes[0].set_ylabel('prob. of being persistent link', fontsize=label_fs) axes[0].tick_params(**tick_style) axes[0].set_title('(a)', fontsize=title_fs) # == == == == == == Part 2: Plot the portion of persistent links that pass statistics test == == == == == == # log_files_list = [ './random_pearsonr.log', './ephemeral_pearsonr.log', './persistent_pearsonr.log', './reciprocal_pearsonr.log' ] link_cnt_list = [] sign_ratio_list = [] same_artist_list = [] sign_ratio_same_artist_list = [] same_genre_list = [] sign_ratio_same_genre_list = [] for log_file in log_files_list: cnt = 0 same_artist_cnt = 0 same_genre_cnt = 0 sign_cnt = 0 sign_cnt_same_artist = 0 sign_cnt_same_genre = 0 with open(log_file, 'r') as fin: for line in fin: src_embed, tar_embed, r, p = line.rstrip().split(',') src_embed = int(src_embed) tar_embed = int(tar_embed) r = float(r) p = float(p) if p < 0.05: sign_cnt += 1 cnt += 1 if embed_cid_dict[src_embed] == embed_cid_dict[tar_embed]: same_artist_cnt += 1 if p < 0.05: sign_cnt_same_artist += 1 if is_same_genre(embed_genre_dict[src_embed], embed_genre_dict[tar_embed]): same_genre_cnt += 1 if p < 0.05: sign_cnt_same_genre += 1 sign_ratio_list.append(sign_cnt / cnt) same_artist_list.append(same_artist_cnt / cnt) sign_ratio_same_artist_list.append(sign_cnt_same_artist / cnt) same_genre_list.append(same_genre_cnt / cnt) sign_ratio_same_genre_list.append(sign_cnt_same_genre / cnt) link_cnt_list.append(cnt) print( '#links: {0}, #sign links: {1}, #sign same artist: {2}, #sign same genre: {3}' .format(cnt, sign_cnt, sign_cnt_same_artist, sign_cnt_same_genre)) ind = np.arange(len(log_files_list)) axes[1].bar(ind, sign_ratio_list, 0.6, edgecolor=['k'] * 4, color=color_cycle_4, lw=1.5, alpha=0.6) axes[1].set_ylim([0, axes[0].get_ylim()[1]]) axes[1].set_ylabel('percentage of links with p<0.05', fontsize=label_fs) axes[1].set_xticklabels( ('', 'random' + r'$^{}$' + '\n({0:,})'.format(link_cnt_list[0]), 'ephemeral' + r'$^{}$' + '\n({0:,})'.format(link_cnt_list[1]), 'persistent' + r'$^{-}$' + '\n({0:,})'.format(link_cnt_list[2]), 'reciprocal' + r'$^{}$' + '\n({0:,})'.format(link_cnt_list[3]))) for tick in ind: axes[1].text(tick, sign_ratio_list[tick] + 0.01, '{0:.3f}'.format(sign_ratio_list[tick]), **bar_text_style) axes[1].tick_params(**tick_style) axes[1].set_title('(b)', fontsize=title_fs) # == == == == == == Part 3: Plot the percentage of significant persistent links belong to the same artist or contain the same genre == == == == == == # axes[2].bar(ind, np.array(same_artist_list) - np.array(sign_ratio_same_artist_list), 0.6, bottom=sign_ratio_same_artist_list, edgecolor=color_cycle_4, color=['w'] * 4, hatch='//', lw=1.5, alpha=0.6) axes[2].bar(ind, sign_ratio_same_artist_list, 0.6, edgecolor=['k'] * 4, color=color_cycle_4, lw=1.5, alpha=0.6) axes[2].set_ylim([0, axes[0].get_ylim()[1]]) axes[2].set_ylabel('same artist', fontsize=label_fs) axes[2].text(0, same_artist_list[0] + 0.01, '{0:.3f}'.format(same_artist_list[0]), **bar_text_style) for tick in ind[1:]: axes[2].text(tick, same_artist_list[tick] + 0.01, '{0:.3f}'.format(same_artist_list[tick]), **bar_text_style) axes[2].text(tick, sign_ratio_same_artist_list[tick] + 0.01, '{0:.3f}'.format(sign_ratio_same_artist_list[tick]), **bar_text_style) axes[2].tick_params(**tick_style) axes[2].get_xaxis().set_visible(False) axes[2].set_title('(c)', fontsize=title_fs) axes[3].bar(ind, np.array(same_genre_list) - np.array(sign_ratio_same_genre_list), 0.6, bottom=sign_ratio_same_genre_list, edgecolor=color_cycle_4, color=['w'] * 4, hatch='//', lw=1.5, alpha=0.6) axes[3].bar(ind, sign_ratio_same_genre_list, 0.6, edgecolor=['k'] * 4, color=color_cycle_4, lw=1.5, alpha=0.6) axes[3].set_ylim([0, axes[0].get_ylim()[1]]) axes[3].set_ylabel('same genre', fontsize=label_fs) for tick in ind: axes[3].text(tick, same_genre_list[tick] + 0.01, '{0:.3f}'.format(same_genre_list[tick]), **bar_text_style) axes[3].text(tick, sign_ratio_same_genre_list[tick] + 0.01, '{0:.3f}'.format(sign_ratio_same_genre_list[tick]), **bar_text_style) axes[3].tick_params(**tick_style) axes[3].set_xticklabels( ('', 'random' + r'$^{}$' + '\n({0:,})'.format(link_cnt_list[0]), 'ephemeral' + r'$^{}$' + '\n({0:,})'.format(link_cnt_list[1]), 'persistent' + r'$^{-}$' + '\n({0:,})'.format(link_cnt_list[2]), 'reciprocal' + r'$^{}$' + '\n({0:,})'.format(link_cnt_list[3]))) hide_spines(axes) timer.stop() plt.tight_layout() plt.savefig('../images/model_persistent_links.pdf', bbox_inches='tight') if not platform.system() == 'Linux': plt.show()
def main(): timer = Timer() timer.start() cornflower_blue = ColorPalette.BLUE tomato = ColorPalette.TOMATO color_cycle_4 = ColorPalette.CC4 label_fs = ColorPalette.LABELFS title_fs = ColorPalette.TITLEFS tick_style = ColorPalette.TICKSTYLE bar_text_style = ColorPalette.BARTEXTSTYLE data_loader = DataLoader() data_loader.load_video_views() embed_view_dict = data_loader.embed_view_dict embed_avg_train_view_dict = { embed: np.mean(embed_view_dict[embed][:-NUM_OUTPUT]) for embed in embed_view_dict.keys() } net_ratio_list = [] src_to_tar_view_ratio = [] link_weights_record = [] naive_smape_list, snaive_smape_list, ar_smape_list, rnn_smape_list, arnet_smape_list = [ [] for _ in range(5) ] naive_daily_smape_mat, snaive_daily_smape_mat, ar_daily_smape_mat, rnn_daily_smape_mat, arnet_daily_smape_mat = [ np.empty((0, NUM_OUTPUT), np.float) for _ in range(5) ] with open('./forecast_tracker_all.json', 'r') as fin: for line in fin: result_json = json.loads(line.rstrip()) tar_embed = result_json['embed'] true_value = result_json['true_value'] naive_pred = result_json['naive_pred'] snaive_pred = result_json['snaive_pred'] ar_pred = result_json['ar_pred'] rnn_pred = result_json['rnn_pred'] arnet_pred = result_json['arnet_pred'] naive_smape, naive_daily_smape_arr = smape(true_value, naive_pred) naive_smape_list.append(naive_smape) naive_daily_smape_mat = np.vstack( (naive_daily_smape_mat, naive_daily_smape_arr)) snaive_smape, snaive_daily_smape_arr = smape( true_value, snaive_pred) snaive_smape_list.append(snaive_smape) snaive_daily_smape_mat = np.vstack( (snaive_daily_smape_mat, snaive_daily_smape_arr)) ar_smape, ar_daily_smape_arr = smape(true_value, ar_pred) ar_smape_list.append(ar_smape) ar_daily_smape_mat = np.vstack( (ar_daily_smape_mat, ar_daily_smape_arr)) rnn_smape, rnn_daily_smape_arr = smape(true_value, rnn_pred) rnn_smape_list.append(rnn_smape) rnn_daily_smape_mat = np.vstack( (rnn_daily_smape_mat, rnn_daily_smape_arr)) arnet_smape, arnet_daily_smape_arr = smape(true_value, arnet_pred) arnet_smape_list.append(arnet_smape) arnet_daily_smape_mat = np.vstack( (arnet_daily_smape_mat, arnet_daily_smape_arr)) # analyse network contribution arnet_net_ratio = result_json['net_ratio'] net_ratio_list.append(arnet_net_ratio) incoming_embeds = result_json['incoming_embeds'] link_weights = result_json['link_weights'] for edge_inx, src_embed in enumerate(incoming_embeds): view_ratio = np.log10(embed_avg_train_view_dict[src_embed] / embed_avg_train_view_dict[tar_embed]) src_to_tar_view_ratio.append(view_ratio) link_weights_record.append(link_weights[edge_inx]) fig, axes = plt.subplots(ncols=3, nrows=1, figsize=(12, 4)) axes = axes.ravel() # == == == == == == Part 1: Plot performance comparison == == == == == == # smape_mat = [ naive_smape_list, snaive_smape_list, ar_smape_list, rnn_smape_list, arnet_smape_list ] axes[0].boxplot(smape_mat, showfliers=False, meanline=True, showmeans=True, widths=0.7) means = [np.mean(x) for x in smape_mat] pos = range(len(means)) for tick, label in zip(pos, axes[1].get_xticklabels()): axes[0].text(pos[tick] + 1, means[tick] + 0.3, '{0:.3f}'.format(means[tick]), **bar_text_style) axes[0].set_xticklabels(['Naive', 'SN', 'AR', 'RNN', 'ARNet'], fontsize=label_fs) axes[0].set_ylabel('SMAPE', fontsize=label_fs) axes[0].tick_params(**tick_style) axes[0].set_title('(a)', fontsize=title_fs) # == == == == == == Part 2: Plot performance with forecast horizon extends == == == == == == # axes[1].plot(np.arange(1, 1 + NUM_OUTPUT), np.mean(naive_daily_smape_mat, axis=0), label='Naive', c='k', mfc='none', marker='D', markersize=4) axes[1].plot(np.arange(1, 1 + NUM_OUTPUT), np.mean(snaive_daily_smape_mat, axis=0), label='SN', c=color_cycle_4[0], mfc='none', marker='*', markersize=5) axes[1].plot(np.arange(1, 1 + NUM_OUTPUT), np.mean(ar_daily_smape_mat, axis=0), label='AR', c=color_cycle_4[1], mfc='none', marker='s', markersize=5) axes[1].plot(np.arange(1, 1 + NUM_OUTPUT), np.mean(rnn_daily_smape_mat, axis=0), label='RNN', c=color_cycle_4[2], mfc='none', marker='^', markersize=5) axes[1].plot(np.arange(1, 1 + NUM_OUTPUT), np.mean(arnet_daily_smape_mat, axis=0), label='ARNet', c=color_cycle_4[3], marker='o', markersize=5) axes[1].set_xlabel('forecast horizon', fontsize=label_fs) axes[1].set_ylabel('SMAPE', fontsize=label_fs) axes[1].set_ylim([6, 23]) axes[1].tick_params(**tick_style) axes[1].legend(frameon=False) axes[1].set_title('(b)', fontsize=title_fs) # == == == == == == Part 3: Plot link strength vs. view ratio from src to tar == == == == == == # bin_axis = np.arange(-2, 1.9, 0.1) bin_records = [[] for _ in range(len(bin_axis))] for x, y in zip(src_to_tar_view_ratio, link_weights_record): if x >= -2: bin_records[int(np.floor((x + 2) * 10))].append(y) for t in np.arange(5, 50, 5): axes[2].fill_between(bin_axis, [np.percentile(x, 50 - t) for x in bin_records], [np.percentile(x, 55 - t) for x in bin_records], facecolor=cornflower_blue, alpha=(100 - 2 * t) / 100, lw=0) axes[2].fill_between(bin_axis, [np.percentile(x, 45 + t) for x in bin_records], [np.percentile(x, 50 + t) for x in bin_records], facecolor=cornflower_blue, alpha=(100 - 2 * t) / 100, lw=0) for t in [10, 30, 70, 90]: axes[2].plot(bin_axis, [np.percentile(x, t) for x in bin_records], color=cornflower_blue, alpha=(100 - 2 * t) / 100, lw=1, zorder=15) median_line = [np.percentile(x, 50) for x in bin_records] axes[2].plot(bin_axis, median_line, color='k', alpha=0.5, zorder=20, lw=1.5) axes[2].xaxis.set_major_formatter( FuncFormatter(lambda x, _: r'$10^{{{0:.0f}}}%$'.format(x))) peak1_idx = int(np.argmax(median_line)) peak2_idx = 10 + int(np.argmax(median_line[10:])) peak1 = (bin_axis[peak1_idx], median_line[peak1_idx]) peak2 = (bin_axis[peak2_idx], median_line[peak2_idx]) axes[2].scatter(peak1[0], peak1[1], s=15, c=tomato, edgecolors='k', zorder=30) axes[2].text(peak1[0] + 0.08, peak1[1] + 0.01, '({0:.2f}, {1:.2f})'.format(10**peak1[0], peak1[1]), ha='left', va='center') axes[2].scatter(peak2[0], peak2[1], s=15, c=tomato, edgecolors='k', zorder=30) axes[2].text(peak2[0], peak2[1] + 0.02, '({0:.2f}, {1:.2f})'.format(10**peak2[0], peak2[1]), ha='center', va='bottom') axes[2].set_xlim((-2.05, 2.02)) axes[2].set_ylim((-0.02, 1.01)) axes[2].set_xlabel('views ratio from video ' + r'$u$' + ' to video ' + r'$v$', fontsize=label_fs) axes[2].set_ylabel('estimated link strength ' + r'$\beta_{u, v}$', fontsize=label_fs) axes[2].set_title('(c)', fontsize=title_fs) hide_spines(axes) timer.stop() plt.tight_layout() plt.savefig('../images/model_prediction_results.pdf', bbox_inches='tight') plt.show()
def main(): timer = Timer() timer.start() cornflower_blue = ColorPalette.BLUE tomato = ColorPalette.TOMATO color_cycle_4 = ColorPalette.CC4 label_fs = ColorPalette.LABELFS title_fs = ColorPalette.TITLEFS tick_style = ColorPalette.TICKSTYLE data_loader = DataLoader() data_loader.load_video_views() embed_view_dict = data_loader.embed_view_dict embed_avg_train_view_dict = { embed: np.mean(embed_view_dict[embed][:-NUM_OUTPUT]) for embed in embed_view_dict.keys() } data_loader.load_embed_content_dict() embed_cid_dict = data_loader.embed_cid_dict embed_genre_dict = data_loader.embed_genre_dict cid_artist_dict = {} cid_tag_dict = {} with open('../data/artist_details.json', 'r') as fin: for line in fin: artist_json = json.loads(line.rstrip()) cid_artist_dict[ artist_json['channel_id']] = artist_json['artist_name'] cid_tag_dict[artist_json['channel_id']] = artist_json['tag-dict'] cid_views_dict = defaultdict(int) cid_views_wo_network_dict = defaultdict(int) arnet_smape_list = [] net_ratio_list = [] same_artist_net_ratio_list = [] same_genre_net_ratio_list = [] total_views = 0 network_explained_views = 0 with open('./embed_prediction.json', 'r') as fin: for line in fin: result_json = json.loads(line.rstrip()) tar_embed = result_json['embed'] avg_train_views = embed_avg_train_view_dict[tar_embed] true_value = result_json['true_value'] arnet_pred = result_json['arnet_pred'] arnet_smape_list.append(smape(true_value, arnet_pred)[0]) incoming_embeds = result_json['incoming_embeds'] link_weights = result_json['link_weights'] same_artist_contributed_views = 0 same_genre_contributed_views = 0 for edge_inx, src_embed in enumerate(incoming_embeds): if embed_cid_dict[tar_embed] == embed_cid_dict[src_embed]: same_artist_contributed_views += link_weights[ edge_inx] * embed_avg_train_view_dict[src_embed] if is_same_genre(embed_genre_dict[tar_embed], embed_genre_dict[src_embed]): same_genre_contributed_views += link_weights[ edge_inx] * embed_avg_train_view_dict[src_embed] # analyse network contribution arnet_net_ratio = result_json['net_ratio'] net_ratio_list.append(arnet_net_ratio) # rounding issue can make the value slightly larger than 1 same_artist_net_ratio_list.append( min(same_artist_contributed_views / avg_train_views, 1)) same_genre_net_ratio_list.append( min(same_genre_contributed_views / avg_train_views, 1)) cid_views_dict[embed_cid_dict[tar_embed]] += avg_train_views cid_views_wo_network_dict[embed_cid_dict[ tar_embed]] += avg_train_views * (1 - arnet_net_ratio) total_views += avg_train_views network_explained_views += avg_train_views * arnet_net_ratio print( '\nFor an average video in our dataset, we estimate {0:.1f}% of the views come from the network.' .format(100 * np.mean(net_ratio_list))) print( 'In particular, {0:.1f}% ({1:.1f}%) of the views come from the same artist.' .format( 100 * np.mean(same_artist_net_ratio_list), 100 * np.mean(same_artist_net_ratio_list) / np.mean(net_ratio_list))) print( 'In total, our model estimates that the recommendation network contributes {0:.1f}% of popularity in the Vevo network.' .format(100 * network_explained_views / total_views)) print('total views for 13K: {0:.1f}M'.format(total_views / 1000000)) print('explained views for 13K: {0:.1f}M'.format(network_explained_views / 1000000)) print('total views for 60K: {0:.1f}M'.format( np.sum(list(embed_avg_train_view_dict.values())) / 1000000)) print('Gini coef with network: {0:.4f}'.format( gini(list(cid_views_dict.values())))) print('Gini coef without network: {0:.4f}\n'.format( gini(list(cid_views_wo_network_dict.values())))) fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(12, 4.2)) gs = axes[0, 0].get_gridspec() for ax in axes[:, 1]: ax.remove() ax_mid = fig.add_subplot(gs[:, 1]) for ax in axes[:, 2]: ax.remove() ax_right = fig.add_subplot(gs[:, 2]) axes = [axes[0, 0], axes[1, 0], ax_mid, ax_right] # == == == == == == Part 1: Plot SMAPE vs. traffic composition == == == == == == # num_bin = 10 sorted_same_artist_tuple_list = sorted( [(x, y) for x, y in zip(same_artist_net_ratio_list, arnet_smape_list)], key=lambda x: x[0]) same_artist_split_values = [ np.percentile(same_artist_net_ratio_list, x) for x in np.arange(10, 101, 10) ] same_artist_bins = [[] for _ in range(num_bin)] for same_artist_net_ratio, arnet_smape in sorted_same_artist_tuple_list: slice_idx = int( np.floor( percentileofscore(same_artist_net_ratio_list, same_artist_net_ratio) / 10)) if slice_idx >= num_bin: slice_idx = num_bin - 1 same_artist_bins[slice_idx].append(arnet_smape) sorted_same_genre_tuple_list = sorted( [(x, y) for x, y in zip(same_genre_net_ratio_list, arnet_smape_list)], key=lambda x: x[0]) same_genre_split_values = [ np.percentile(same_genre_net_ratio_list, x) for x in np.arange(10, 101, 10) ] same_genre_bins = [[] for _ in range(num_bin)] for same_genre_net_ratio, arnet_smape in sorted_same_genre_tuple_list: slice_idx = int( np.floor( percentileofscore(same_genre_net_ratio_list, same_genre_net_ratio) / 10)) if slice_idx >= num_bin: slice_idx = num_bin - 1 same_genre_bins[slice_idx].append(arnet_smape) axes[0].plot(range(1, 11, 1), [np.mean(x) for x in same_artist_bins], color=cornflower_blue, label='same artist', mfc='none', marker='o', markersize=4) axes[1].plot(range(1, 11, 1), [np.mean(x) for x in same_genre_bins], color=tomato, label='same genre', mfc='none', marker='o', markersize=4) for ax in [axes[0], axes[1]]: ax.set_xlim([0.5, 10.5]) ax.set_ylim([7, 10.5]) ax.set_ylabel('SMAPE', fontsize=label_fs) ax.xaxis.set_ticks(np.arange(1, 10, 2)) ax.tick_params(**tick_style) ax.legend(frameon=False) axes[0].xaxis.set_major_formatter( FuncFormatter(lambda x, _: '({0:.3f})'.format(same_artist_split_values[ int(x) - 1]))) axes[1].xaxis.set_major_formatter( FuncFormatter(lambda x, _: '{0:.0f}%\n({1:.3f})'.format( 10 * x, same_genre_split_values[int(x) - 1]))) # axes[0].xaxis.set_major_formatter( # FuncFormatter(lambda x, _: '({0:.3f})'.format(10 * x))) # axes[1].xaxis.set_major_formatter( # FuncFormatter(lambda x, _: '{0:.0f}%\n({1:.3f})'.format(10 * x, 10 * x))) axes[1].set_xlabel('$\eta_v$ percentile', fontsize=label_fs) axes[0].set_title('(a)', fontsize=title_fs) # == == == == == == Part 2: Plot who can utilize the network better? == == == == == == # artist_views_list = list(cid_views_dict.values()) wo_network_artist_views_list = list(cid_views_wo_network_dict.values()) cid_list = sorted(cid_views_dict.keys()) artist_true_percentile = [ percentileofscore(artist_views_list, cid_views_dict[cid]) for cid in cid_list ] wo_network_artist_percentile = [ percentileofscore(wo_network_artist_views_list, cid_views_wo_network_dict[cid]) for cid in cid_list ] percentile_change = np.array([ artist_true_percentile[i] - wo_network_artist_percentile[i] for i in range(len(cid_list)) ]) num_popularity_loss = sum(percentile_change < 0) num_popularity_equal = sum(percentile_change == 0) num_popularity_gain = sum(percentile_change > 0) print('{0} ({1:.2f}%) artists lose popularity with network'.format( num_popularity_loss, num_popularity_loss / len(cid_list) * 100)) print('{0} ({1:.2f}%) artists with no popularity change'.format( num_popularity_equal, num_popularity_equal / len(cid_list) * 100)) print('{0} ({1:.2f}%) artists gain popularity with network\n'.format( num_popularity_gain, num_popularity_gain / len(cid_list) * 100)) artist_percentile_mat = [[] for _ in range(10)] artist_cid_mat = [[] for _ in range(10)] for idx, percentile_value in enumerate(wo_network_artist_percentile): bin_idx = min(int(np.floor(percentile_value / 10)), 9) artist_percentile_mat[bin_idx].append(artist_true_percentile[idx] - percentile_value) artist_cid_mat[bin_idx].append(cid_list[idx]) red_circle = dict(markerfacecolor=tomato, marker='o', markersize=4) axes[2].boxplot(artist_percentile_mat, showfliers=True, widths=0.5, flierprops=red_circle) axes[2].axhline(y=0, color=cornflower_blue, linestyle='--', lw=1, zorder=0) axes[2].set_xlabel('artist popularity percentile without network', fontsize=label_fs) axes[2].set_ylabel('percentile change with network', fontsize=label_fs) axes[2].tick_params(**tick_style) axes[2].set_xticks(axes[2].get_xticks()[::2]) axes[2].xaxis.set_major_formatter( FuncFormatter(lambda x, _: '{0:.0f}%'.format(10 * x))) axes[2].yaxis.set_major_formatter( FuncFormatter(lambda x, _: '{0:.0f}%'.format(x))) axes[2].set_title('(b)', fontsize=12) # find outliers whis = 1.5 top_outliers_list = [] bottom_outliers_list = [] for box_idx, box in enumerate(artist_percentile_mat): q1 = np.percentile(box, 25) q3 = np.percentile(box, 75) iq = q3 - q1 hi_val = q3 + whis * iq lo_val = q1 - whis * iq for idx, val in enumerate(box): if val > hi_val: top_outliers_list.append((artist_cid_mat[box_idx][idx], val)) elif val < lo_val: bottom_outliers_list.append( (artist_cid_mat[box_idx][idx], val)) sorted_top_outliers_list = sorted( [(cid_artist_dict[x[0]], cid_tag_dict[x[0]], int( cid_views_dict[x[0]]), x[1]) for x in top_outliers_list], key=lambda t: t[2], reverse=True) for t in sorted_top_outliers_list: print(t) print('-------------------') sorted_bottom_outliers_list = sorted( [(cid_artist_dict[x[0]], cid_tag_dict[x[0]], int( cid_views_dict[x[0]]), x[1]) for x in bottom_outliers_list], key=lambda t: t[2], reverse=True) for t in sorted_bottom_outliers_list: print(t) indie_xaxis, indie_yaxis = [], [] rap_xaxis, rap_yaxis = [], [] other_xaxis, other_yaxis = [], [] lose_xaxis, lose_yaxis = [], [] for top_outlier, _ in top_outliers_list: if 'indie' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'alternative' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'new wave' in ','.join(cid_tag_dict[top_outlier].keys()): indie_xaxis.append(cid_views_dict[top_outlier]) indie_yaxis.append((cid_views_dict[top_outlier] - cid_views_wo_network_dict[top_outlier]) / cid_views_dict[top_outlier]) elif 'rap' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'hip hop' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'rhythm and blues' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'reggae' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'punk' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'funk' in ','.join(cid_tag_dict[top_outlier].keys()) or \ 'r&b' in ','.join(cid_tag_dict[top_outlier].keys()): rap_xaxis.append(cid_views_dict[top_outlier]) rap_yaxis.append((cid_views_dict[top_outlier] - cid_views_wo_network_dict[top_outlier]) / cid_views_dict[top_outlier]) else: other_xaxis.append(cid_views_dict[top_outlier]) other_yaxis.append((cid_views_dict[top_outlier] - cid_views_wo_network_dict[top_outlier]) / cid_views_dict[top_outlier]) for bottom_outlier, _ in bottom_outliers_list: lose_xaxis.append(cid_views_dict[bottom_outlier]) lose_yaxis.append((cid_views_dict[bottom_outlier] - cid_views_wo_network_dict[bottom_outlier]) / cid_views_dict[bottom_outlier]) axes[3].scatter(indie_xaxis, indie_yaxis, marker='^', facecolors='none', edgecolors=color_cycle_4[0], s=20, label='Indie: {0}'.format(len(indie_xaxis))) axes[3].scatter(rap_xaxis, rap_yaxis, marker='o', facecolors='none', edgecolors=color_cycle_4[1], s=20, label='Hip hop: {0}'.format(len(rap_xaxis))) axes[3].scatter(other_xaxis, other_yaxis, marker='s', facecolors='none', edgecolors=color_cycle_4[2], s=20, label='Other: {0}'.format(len(other_xaxis))) # axes[3].scatter(lose_xaxis, lose_yaxis, marker='x', color=color_cycle_4[3], s=20, label='artists lose popularity: {0}'.format(len(bad_xaxis))) axes[3].set_ylim((-0.02, 1.02)) axes[3].set_xscale('log') axes[3].set_xlabel('artist average daily views', fontsize=label_fs) axes[3].set_ylabel('network contribution ratio ' + '$\eta_v$', fontsize=label_fs) axes[3].tick_params(**tick_style) axes[3].legend(frameon=False, loc='lower left') axes[3].set_title('(c)', fontsize=title_fs) hide_spines(axes) timer.stop() plt.tight_layout(w_pad=0.2) plt.savefig('../images/model_prediction_analysis.pdf', bbox_inches='tight') plt.show()
def main(): # == == == == == == Part 1: Set up environment == == == == == == # timer = Timer() timer.start() data_prefix = '../data/' target_day_indices = [0, 15, 30, 45] color_cycle_4 = ColorPalette.CC4 date_labels = [ 'Sep 01, 2018', 'Sep 16, 2018', 'Oct 01, 2018', 'Oct 16, 2018' ] # == == == == == == Part 2: Load video views == == == == == == # data_loader = DataLoader() data_loader.load_video_views() embed_view_dict = data_loader.embed_view_dict embed_avg_view_dict = data_loader.embed_avg_view_dict num_videos = data_loader.num_videos target_day_view_list = [[], [], [], []] for embed in range(num_videos): for target_idx, target_day in enumerate(target_day_indices): target_day_view_list[target_idx].append( embed_view_dict[embed][target_day]) # == == == == == == Part 3: Load dynamic network snapshot == == == == == == # embed_indegree_dict = { embed: np.zeros((T, )) for embed in np.arange(num_videos) } # daily indegree for each embed zero_indegree_list = [] # percentage of zero indegree for each day num_edges_list = [] # number of total edges for each day for t in range(T): filename = 'network_{0}.p'.format( (datetime(2018, 9, 1) + timedelta(days=t)).strftime('%Y-%m-%d')) indegree_list = [] with open(os.path.join(data_prefix, 'network_pickle', filename), 'rb') as fin: network_dict = pickle.load(fin) # embed_tar: [(embed_src, pos_src, view_src), ...] for tar_embed in range(num_videos): indegree_value = len( [1 for x in network_dict[tar_embed] if x[1] < NUM_REL]) embed_indegree_dict[tar_embed][t] = indegree_value indegree_list.append(indegree_value) indegree_counter = Counter(indegree_list) zero_indegree_list.append(indegree_counter[0] / num_videos) num_edges_list.append(sum(indegree_list)) print('>>> Finish loading day {0}...'.format(t + 1)) print('>>> Network structure has been loaded!') print('\n>>> Average number of edges: {0:.0f}, max: {1:.0f}, min: {2:.0f}'. format( sum(num_edges_list) / len(num_edges_list), max(num_edges_list), min(num_edges_list))) fig, axes = plt.subplots(1, 3, figsize=(12, 4.5)) ax1, ax2, ax3 = axes.ravel() # == == == == == == Part 4: Plot ax1 indegree CCDF == == == == == == # embed_avg_indegree_dict = defaultdict(float) for t in range(T): for embed in range(num_videos): embed_avg_indegree_dict[embed] += embed_indegree_dict[embed][t] / T indegree_ranked_embed_list = [ x[0] for x in sorted(embed_avg_indegree_dict.items(), key=lambda kv: kv[1], reverse=True) ] top_20_indegree_embeds = indegree_ranked_embed_list[:20] popular_ranked_embed_list = [ x[0] for x in sorted( embed_avg_view_dict.items(), key=lambda kv: kv[1], reverse=True) ] top_20_popular_embeds = popular_ranked_embed_list[:20] for target_idx, target_day in enumerate(target_day_indices): indegree_list = [] for embed in range(num_videos): indegree_list.append(embed_indegree_dict[embed][target_day]) print( 'video with 10 indegree has more in-links than {0:.2f}% videos on date {1}' .format(percentileofscore(indegree_list, 10), date_labels[target_idx])) print( 'video with 20 indegree has more in-links than {0:.2f}% videos on date {1}' .format(percentileofscore(indegree_list, 20), date_labels[target_idx])) plot_ccdf(indegree_list, ax=ax1, color=color_cycle_4[target_idx], label=date_labels[target_idx]) # compute the powerlaw fit powerlaw_fit = Fit(list(embed_avg_indegree_dict.values())) infer_alpha = powerlaw_fit.power_law.alpha p = powerlaw_fit.power_law.ccdf() ins_x_axis = powerlaw_fit.power_law.__dict__['parent_Fit'].__dict__[ 'data'][:int(0.9 * len(p))] ins_y_axis = 0.1 * p[:int(0.9 * len(p))] ax1.plot(ins_x_axis, ins_y_axis, 'k:') ax1.text(0.4, 0.6, r'$x^{{{0:.2f}}}$'.format(-infer_alpha + 1), size=12, ha='right', va='bottom', transform=ax1.transAxes) ax1.set_xscale('log') ax1.set_yscale('log') ax1.set_xlabel('indegree', fontsize=11) ax1.set_ylabel('$P(X) \geq x$', fontsize=11) ax1.tick_params(axis='both', which='major', labelsize=10) ax1.set_title('(a) indegree distribution', fontsize=12) ax1.legend(frameon=False, fontsize=11, ncol=1, fancybox=False, shadow=True) mean_zero_indegree = sum(zero_indegree_list) / len(zero_indegree_list) ax1.axhline(y=1 - mean_zero_indegree, color='k', linestyle='--', zorder=30) ax1.text(0.96, 0.9, '{0:.0f}% with 0 indegree'.format(mean_zero_indegree * 100), size=11, transform=ax1.transAxes, ha='right', va='top') # == == == == == == Part 5: Plot ax2 views distribution == == == == == == # for target_idx, views_list in enumerate(target_day_view_list): x_values = range(100) y_values = [np.percentile(views_list, x) for x in x_values] ax2.plot(x_values, y_values, color=color_cycle_4[target_idx], label=date_labels[target_idx]) ax2.set_yscale('log') ax2.set_xlabel('views percentile', fontsize=11) ax2.set_ylabel('num of views', fontsize=11) ax2.tick_params(axis='both', which='major', labelsize=10) ax2.set_title('(b) daily views vs. its percentile', fontsize=12) avg_views_list = sorted(list(embed_avg_view_dict.values()), reverse=True) gini_coef = gini(avg_views_list) print('top 1% videos occupy {0:.2f}% views'.format( sum(avg_views_list[:int(0.01 * num_videos)]) / sum(avg_views_list) * 100)) print('top 10% videos occupy {0:.2f}% views'.format( sum(avg_views_list[:int(0.1 * num_videos)]) / sum(avg_views_list) * 100)) print('Gini coef: {0:.3f}'.format(gini_coef)) spearman_degree = [ embed_avg_indegree_dict[embed] for embed in range(num_videos) ] spearman_views = [ embed_avg_view_dict[embed] for embed in range(num_videos) ] print( 'Spearman correlation between views and indegree: {0:.4f}, pvalue: {1:.2f}' .format(*spearmanr(spearman_views, spearman_degree))) median_views = np.median(avg_views_list) top_views_90th = np.percentile(avg_views_list, 90) top_views_99th = np.percentile(avg_views_list, 99) ax2_xmin = ax2.get_xlim()[0] ax2_ymin = ax2.get_ylim()[0] ax2.plot((50, 50), (ax2_ymin, median_views), color='k', linestyle='--', zorder=30) ax2.plot((ax2_xmin, 50), (median_views, median_views), color='k', linestyle='--', zorder=30) ax2.text(0.49, 0.45, 'median views {0:,.0f}'.format(median_views), size=11, transform=ax2.transAxes, ha='right', va='bottom') ax2.plot((90, 90), (ax2_ymin, top_views_90th), color='k', linestyle='--', zorder=30) ax2.plot((ax2_xmin, 90), (top_views_90th, top_views_90th), color='k', linestyle='--', zorder=30) ax2.text(0.88, 0.75, '90th views {0:,.0f}'.format(top_views_90th), size=11, transform=ax2.transAxes, ha='right', va='bottom') ax2.plot((99, 99), (ax2_ymin, top_views_99th), color='k', linestyle='--', zorder=30) ax2.plot((ax2_xmin, 99), (top_views_99th, top_views_99th), color='k', linestyle='--', zorder=30) ax2.text(0.91, 0.95, '99th views {0:,.0f}'.format(top_views_99th), size=11, transform=ax2.transAxes, ha='right', va='bottom') # == == == == == == Part 7: Plot ax3 video uploading trend == == == == == == # x_axis = range(2009, 2018) x_labels = ["'09", "'10", "'11", "'12", "'13", "'14", "'15", "'16", "'17"] upload_mat = np.zeros((len(x_axis), 8)) target_topics = [ 'Pop_music', 'Rock_music', 'Hip_hop_music', 'Independent_music', 'Country_music', 'Electronic_music', 'Soul_music', 'Others' ] topic_labels = [ 'Pop', 'Rock', 'Hip hop', 'Independent', 'Country', 'Electronic', 'Soul', 'Others' ] color_cycle_8 = ColorPalette.CC8 data_loader.load_embed_content_dict() embed_title_dict = data_loader.embed_title_dict embed_uploadtime_dict = data_loader.embed_uploadtime_dict embed_genre_dict = data_loader.embed_genre_dict for embed in range(num_videos): upload_year = int(embed_uploadtime_dict[embed][:4]) if 2009 <= upload_year <= 2017: year_idx = upload_year - 2009 genres = embed_genre_dict[embed] if len(genres) == 0: # add one to "Others" genre upload_mat[year_idx, 7] += 1 else: for genre in genres: upload_mat[year_idx, target_topics.index(genre)] += 1 / len(genres) print() print([ '{0}: {1}'.format(topic, int(num)) for topic, num in zip(target_topics, np.sum(upload_mat, axis=0)) ]) stackedBarPlot(ax=ax3, data=upload_mat, cols=color_cycle_8, edgeCols=['#000000'] * 8, xlabel='uploaded year', ylabel='num of videos', scale=False, endGaps=True) ax3.tick_params(axis='both', which='major', labelsize=9) ax3.set_xticks(np.arange(len(x_axis))) ax3.set_xticklabels(x_labels) ax3.yaxis.set_major_formatter(FuncFormatter(concise_fmt)) ax3.legend([ plt.Rectangle((0, 0), 1, 1, fc=c, ec='k', alpha=0.6) for c in color_cycle_8 ], topic_labels, fontsize=9, frameon=False, handletextpad=0.2, columnspacing=0.3, ncol=4, bbox_to_anchor=(1, -0.12), bbox_transform=ax3.transAxes, fancybox=False, shadow=True) ax3.set_title('(c) VEVO videos uploading trend', fontsize=12) union_top_set = set(top_20_indegree_embeds).union(top_20_popular_embeds) print('\n>>> Size of the union set at cutoff 15:', len(union_top_set)) print('{0:>24} | {1:>17} | {2:>5} | {3:>8} | {4:>6} | {5:>10} | {6:>5}'. format('Video title', 'Artist', 'Age', 'Indegree', '-rank', 'Views', '-rank')) for embed in top_20_indegree_embeds: print( '{0:>24} & {1:>17} & {2:>5} & {3:>8} & {4:>6} & {5:>10} & {6:>5} \\\\' .format( embed_title_dict[embed].split( ' - ', 1)[1].split('(')[0].split('ft')[0].strip(), embed_title_dict[embed].split( ' - ', 1)[0].split('&')[0].split(',')[0].strip(), '{0:,}'.format( (datetime(2018, 11, 2) - str2obj(embed_uploadtime_dict[embed])).days), '{0:,}'.format(int(embed_avg_indegree_dict[embed])), '{0:,}'.format(top_20_indegree_embeds.index(embed) + 1), '{0:,}'.format(int(embed_avg_view_dict[embed])), '{0:,}'.format(popular_ranked_embed_list.index(embed) + 1))) print('\n{0:>24} | {1:>17} | {2:>5} | {3:>8} | {4:>6} | {5:>10} | {6:>5}'. format('Video title', 'Artist', 'Age', 'Indegree', '-rank', 'Views', '-rank')) for embed in top_20_popular_embeds: print( '{0:>24} & {1:>17} & {2:>5} & {3:>8} & {4:>6} & {5:>10} & {6:>5} \\\\' .format( embed_title_dict[embed].split( ' - ', 1)[1].split('(')[0].split('ft')[0].strip(), embed_title_dict[embed].split( ' - ', 1)[0].split('&')[0].split(',')[0].strip(), '{0:,}'.format( (datetime(2018, 11, 2) - str2obj(embed_uploadtime_dict[embed])).days), '{0:,}'.format(int(embed_avg_indegree_dict[embed])), '{0:,}'.format(indegree_ranked_embed_list.index(embed) + 1), '{0:,}'.format(int(embed_avg_view_dict[embed])), '{0:,}'.format(top_20_popular_embeds.index(embed) + 1))) hide_spines(axes) timer.stop() plt.tight_layout() plt.savefig('../images/measure_basic_statistics.pdf', bbox_inches='tight') if not platform.system() == 'Linux': plt.show()
def main(): # == == == == == == Part 1: Set up environment == == == == == == # timer = Timer() timer.start() data_prefix = '../data/' year_labels = [ "all years", "'09", "'10", "'11", "'12", "'13", "'14", "'15", "'16", "'17", "'18" ] num_year = len(year_labels) - 1 # == == == == == == Part 2: Load video views == == == == == == # data_loader = DataLoader() data_loader.load_video_views() data_loader.load_embed_content_dict() embed_avg_view_dict = data_loader.embed_avg_view_dict embed_uploadtime_dict = data_loader.embed_uploadtime_dict num_videos = data_loader.num_videos for embed in range(num_videos): upload_year = int(embed_uploadtime_dict[embed][:4]) if upload_year >= 2009: year_idx = upload_year - 2009 else: year_idx = 0 embed_uploadtime_dict[embed] = year_idx views_by_years_list = [[] for _ in range(num_year)] indegrees_by_years_list = [[] for _ in range(num_year)] # == == == == == == Part 3: Load dynamic network snapshot == == == == == == # embed_indegree_dict_15 = { embed: np.zeros((T, )) for embed in np.arange(num_videos) } for t in range(T): filename = 'network_{0}.p'.format( obj2str(datetime(2018, 9, 1) + timedelta(days=t))) with open(os.path.join(data_prefix, 'network_pickle', filename), 'rb') as fin: network_dict = pickle.load(fin) # embed_tar: [(embed_src, pos_src, view_src)] for embed in range(num_videos): embed_indegree_dict_15[embed][t] = len( [1 for x in network_dict[embed] if x[1] < NUM_REL_15]) print('>>> Finish loading day {0}...'.format(t + 1)) print('>>> Network structure has been loaded!') for embed in range(num_videos): views_by_years_list[embed_uploadtime_dict[embed]].append( embed_avg_view_dict[embed]) indegrees_by_years_list[embed_uploadtime_dict[embed]].append( np.mean(embed_indegree_dict_15[embed])) spearman_traces = [] all_views, all_indegrees = [], [] for i in range(num_year): all_views.extend(views_by_years_list[i]) all_indegrees.extend(indegrees_by_years_list[i]) print('\n>>> {0}'.format(year_labels[0]), spearmanr(all_views, all_indegrees)) spearman_traces.append(spearmanr(all_views, all_indegrees)[0]) for i in range(num_year): spearman_traces.append( spearmanr(views_by_years_list[i], indegrees_by_years_list[i])[0]) print('>>> {0} year'.format(year_labels[1 + i]), spearmanr(views_by_years_list[i], indegrees_by_years_list[i])) # == == == == == == Part 4: Plotting script == == == == == == # fig, ax1 = plt.subplots(1, 1, figsize=(8, 2)) tomato = ColorPalette.TOMATO blue = ColorPalette.BLUE bar1 = ax1.bar(range(num_year + 1), spearman_traces, edgecolor=['k'] * (num_year + 1), color=[tomato] + [blue] * num_year, lw=1) for rect in bar1: height = rect.get_height() plt.text(rect.get_x() + rect.get_width() / 2.0, height, '{0:.3f}'.format(height), ha='center', va='bottom') ax1.set_xticks(np.arange(11)) ax1.set_xticklabels(year_labels) ax1.set_ylabel(r'spearman $\rho$') hide_spines(ax1) timer.stop() plt.tight_layout() plt.savefig('../images/measure_spearmanr.pdf', bbox_inches='tight') if not platform.system() == 'Linux': plt.show()
def main(): # == == == == == == Part 1: Set up environment == == == == == == # timer = Timer() timer.start() data_prefix = '../data/' # == == == == == == Part 2: Load video views == == == == == == # data_loader = DataLoader() data_loader.load_video_views() num_videos = data_loader.num_videos # == == == == == == Part 3: Load dynamic network snapshot == == == == == == # embed_indegree_dict = {embed: np.zeros((T,)) for embed in np.arange(num_videos)} edge_frequency_dict = defaultdict(int) for t in range(T): filename = 'network_{0}.p'.format((datetime(2018, 9, 1) + timedelta(days=t)).strftime('%Y-%m-%d')) with open(os.path.join(data_prefix, 'network_pickle', filename), 'rb') as fin: network_dict = pickle.load(fin) # embed_tar: [(embed_src, pos_src, view_src), ...] for embed_tar in range(num_videos): inlinks = [x for x in network_dict[embed_tar] if x[1] < NUM_REL] if len(inlinks) > 0: for embed_src, _, _, in inlinks: edge_frequency_dict['{0}-{1}'.format(embed_src, embed_tar)] += 1 embed_indegree_dict[embed_tar][t] = len(inlinks) print('>>> Finish loading day {0}...'.format(t + 1)) print('>>> Network structure has been loaded!') link_frequency_counter = Counter(edge_frequency_dict.values()) # == == == == == == Part 4: Plot how indegree changes == == == == == == # cornflower_blue = ColorPalette.BLUE tomato = ColorPalette.TOMATO fig, axes = plt.subplots(1, 2, figsize=(12, 4.1)) ax1, ax2 = axes.ravel() indegree_change_dict = defaultdict(list) for embed in range(num_videos): for t in range(T-1): x0 = embed_indegree_dict[embed][t] x1 = embed_indegree_dict[embed][t+1] if x0 >= 10: indegree_change_dict[x0].append((x1-x0) / x0) x_axis = sorted([x for x in indegree_change_dict.keys() if len(indegree_change_dict[x]) >= 100]) for i in np.arange(5, 50, 5): ax1.fill_between(x_axis, [smoothing(indegree_change_dict, x, 50 - i) for x in x_axis], [smoothing(indegree_change_dict, x, 55 - i) for x in x_axis], facecolor=cornflower_blue, alpha=(100 - 2 * i) / 100, lw=0) ax1.fill_between(x_axis, [smoothing(indegree_change_dict, x, 45 + i) for x in x_axis], [smoothing(indegree_change_dict, x, 50 + i) for x in x_axis], facecolor=cornflower_blue, alpha=(100 - 2 * i) / 100, lw=0) for i in [25, 75]: ax1.plot(x_axis, [smoothing(indegree_change_dict, x, i) for x in x_axis], color=cornflower_blue, alpha=0.8, zorder=15) ax1.plot(x_axis, [smoothing(indegree_change_dict, x, 50) for x in x_axis], color=cornflower_blue, alpha=1, zorder=15) ax1.set_ylim([-0.9, 0.9]) ax1.set_xlabel('indegree', fontsize=12) ax1.set_ylabel('indegree change ratio the next day', fontsize=12) ax1.set_title('(a)', fontsize=12) ax1.tick_params(axis='both', which='major', labelsize=10) plot_contour(indegree_change_dict, target_x=100, ax=ax1) x_axis = range(1, 1 + T) y_axis = [link_frequency_counter[x] for x in x_axis] print('\nephemeral links of frequency 1, {0}, {1:.2f}%'.format(y_axis[0], y_axis[0] / sum(y_axis) * 100)) print('persistent links of frequency 63, {0}, {1:.2f}%'.format(y_axis[-1], y_axis[-1] / sum(y_axis) * 100)) ax2.plot(x_axis, y_axis, 'o-', c=tomato, mfc='none', mec=tomato, ms=4) ax2.set_xlabel('link frequency', fontsize=12) ax2.set_ylabel('num of video-to-video pairs', fontsize=12) ax2.tick_params(axis='both', which='major', labelsize=10) ax2.set_title('(b)', fontsize=12) ax2.annotate('ephemeral links', fontsize=12, xy=(3, 350000), xycoords='data', xytext=(17, 350000), textcoords='data', arrowprops=dict(arrowstyle='->', connectionstyle='arc3')) ax2.annotate('frequent links', fontsize=12, xy=(61, 35000), xycoords='data', xytext=(35, 55000), textcoords='data', arrowprops=dict(arrowstyle='->', connectionstyle='arc3')) ax2.yaxis.set_major_formatter(FuncFormatter(concise_fmt)) hide_spines(axes) timer.stop() plt.tight_layout() plt.savefig('../images/measure_temporal_micro.pdf', bbox_inches='tight') if not platform.system() == 'Linux': plt.show()