Example #1
0
def dataset(type, polarity_zone, subjectivity_zone):
    if type == 'old':
        # Load the text dataset, drop null non-text, assign UserId
        all_df = ld.all_post()
        all_df = all_df[['postId', 'LikesCount', 'SharesCount', 'CommentsCount', 'PostTextLength', 'PostTextPolarity', 'PostTextSubjectivity']].dropna()
        all_df['UserId'] = all_df['postId'].str.split('_').str.get(0).astype(int)
        all_df['zone'] = 'a'

        all_df = threshold_zones(all_df, polarity_zone, subjectivity_zone)

        zone_dummies = pd.get_dummies(all_df['zone'])
        column_zone_dummies = zone_dummies.columns.tolist()
        dict_aggr = {x:np.sum for x in column_zone_dummies}
        all_df = all_df.join(zone_dummies)
        aggr_df = all_df.groupby(['UserId'], sort=True).agg(dict_aggr).reset_index()

        # Load golden standard file
        gs_df = pd.read_csv('data/userlevel_all_features_1007.csv', header=0)

        # Merge zone file and golden standard
        aggr_df = pd.merge(aggr_df, gs_df, how='inner', left_on='UserId', right_on='UserNum')
        aggr_df = aggr_df[['UserId', 'ActiveInterests']+column_zone_dummies]

        # harusnya digabung dulu, baru activeinterests dibikin dummies
    elif type == 'new':
        # Load the new dataset, drop null non-text, assign UserId
        ds_file_array = ['data/english_foodgroup_new.json', 'data/english_TEDtranslate_new.json',
                         'data/english_traveladdiction_new.json']
        all_df = ld.new_dataset(ds_file_array)
        # print all_df.dtypes
        all_df = all_df[['UserID', 'LikesCount', 'SharesCount', 'CommentsCount', 'PostTextLength', 'PostTextPolarity','PostTextSubjectivity', 'ActiveInterests']].dropna()
        all_df['UserId'] = all_df['UserID']
        all_df['zone'] = 'a'
        all_df = threshold_zones(all_df, polarity_zone, subjectivity_zone)

        zone_dummies = pd.get_dummies(all_df['zone'])
        column_zone_dummies = zone_dummies.columns.tolist()
        dict_aggr = {x: np.sum for x in column_zone_dummies}
        dict_aggr.update({'ActiveInterests': np.min})
        all_df = all_df.join(zone_dummies)
        aggr_df = all_df.groupby(['UserId'], sort=True).agg(dict_aggr).reset_index()
    """
    target_dummies = pd.get_dummies(aggr_df['ActiveInterests'])
    aggr_df = aggr_df.join(target_dummies)
    target = sorted(list(set(target_dummies.columns.tolist()) - set(['Random'])))
    """

    return aggr_df, column_zone_dummies #, target
def dataset_allpost(type):
    if type == 'old':
        # Load the text dataset, drop null non-text, assign UserId
        all_df = ld.all_post()
        all_df = all_df[['postId', 'PostTextLength', 'PostTextPolarity', 'PostTextSubjectivity']].dropna()
        all_df['UserId'] = all_df['postId'].str.split('_').str.get(0).astype(int)
        all_df.drop('postId', axis=1, inplace=True)

    elif type == 'new':
        # Load the new dataset, drop null non-text, assign UserId
        ds_file_array = ['data/english_foodgroup_new.json', 'data/english_TEDtranslate_new.json',
                         'data/english_traveladdiction_new.json']
        photo_file = ['data/album_english_foodgroups.json', 'data/album_english_TEDtranslate.json',
                      'data/album_english_traveladdiction.json']
        friendsnum_file = ['data/english_foodgroups_friendsnum.json', 'data/english_TEDtranslate_friendsnum.json',
                           'data/english_traveladdiction_friendsnum.json']

        all_df = ld.new_dataset(ds_file_array)
        # print all_df.dtypes
        all_df = all_df[['UserID', 'PostTextLength', 'PostTextPolarity','PostTextSubjectivity']].dropna()
        all_df['UserId'] = all_df['UserID']
        all_df.drop('UserID', axis=1, inplace=True)

    return all_df
Example #3
0
    return nlargest

# LOAD THE DATASET
# ds_file = 'data/english_foodgroup_new.json'
# ds_file = 'data/english_TEDtranslate_new.json'
# ds_file = 'data/english_traveladdiction_new.json'
ds_file = ['data/english_foodgroup_new.json', 'data/english_TEDtrans late_new.json', 'data/english_traveladdiction_new.json']

# profilephoto
# photo_file = 'data/album_english_foodgroups.json'
# photo_file = 'data/album_english_TEDtranslate.json'
# photo_file = 'data/album_english_traveladdiction.json'
photo_file = ['data/album_english_foodgroups.json', 'data/album_english_TEDtranslate.json', 'data/album_english_traveladdiction.json']
friendsnum_file = ['data/english_foodgroups_friendsnum.json', 'data/english_TEDtranslate_friendsnum.json', 'data/english_traveladdiction_friendsnum.json']

all_df = ld.new_dataset(ds_file)
all_df = all_df[['UserID', 'LikesCount', 'SharesCount', 'CommentsCount', 'PostTextLength', 'PostTextPolarity', 'PostTextSubjectivity', 'ActiveInterests']].dropna()
# print all_df.dtypes
# quit()

# separate into zone
all_df['zone'] = 0
"""
# zone po
all_df['zone'].loc[ ((all_df['PostTextPolarity'] > 0) & (all_df['PostTextPolarity'] <= 1.0)) &
                    ((all_df['PostTextSubjectivity'] >= 0) & (all_df['PostTextSubjectivity'] <= 0.5)) ] = 'po'
# zone ps
all_df['zone'].loc[ ((all_df['PostTextPolarity'] > 0) & (all_df['PostTextPolarity'] <= 1.0)) &
                    ((all_df['PostTextSubjectivity'] > 0.5) & (all_df['PostTextSubjectivity'] <= 1.0)) ] = 'ps'
# zone nto
all_df['zone'].loc[ ((all_df['PostTextPolarity'] == 0)) &
Example #4
0
def dataset(type, polarity_zone, subjectivity_zone):
    if type == 'old':
        # Load the text dataset, drop null non-text, assign UserId
        all_df = ld.all_post()
        all_df = all_df[['postId', 'LikesCount', 'SharesCount', 'CommentsCount', 'PostTextLength', 'PostTextPolarity', 'PostTextSubjectivity', 'PostTime']].dropna()
        all_df['UserId'] = all_df['postId'].str.split('_').str.get(0).astype(int)
        all_df['zone'] = 'a'

        all_df = threshold_zones(all_df, polarity_zone, subjectivity_zone)

        zone_dummies = pd.get_dummies(all_df['zone'])
        column_zone_dummies = zone_dummies.columns.tolist()

        ent_ = [x + '_ratio' for x in column_zone_dummies]
        ent_dummies = pd.get_dummies(all_df['entropy_all'])

        all_df = all_df.join(ent_dummies)
        all_df = all_df.join(zone_dummies)

        dict_aggr = {x:np.sum for x in column_zone_dummies}
        dict_aggr.update({x: np.mean for x in ent_})

        aggr_df = all_df.groupby(['UserId'], sort=True).agg(dict_aggr).reset_index()

        # add day part
        day_part_df = ld.func_day_part(all_df)
        aggr_df = pd.merge(aggr_df, day_part_df, how='inner', left_on='UserId', right_on='UserId')

        # Load golden standard file
        gs_df = pd.read_csv('data/userlevel_all_features_1007.csv', header=0)

        # Merge zone file and golden standard
        aggr_df = pd.merge(aggr_df, gs_df, how='inner', left_on='UserId', right_on='UserNum')
        # aggr_df = aggr_df[['UserId', 'ActiveInterests']+column_zone_dummies+ent_]

        # harusnya digabung dulu, baru activeinterests dibikin dummies
    elif type == 'new':
        # Load the new dataset, drop null non-text, assign UserId
        ds_file_array = ['data/english_foodgroup_new.json', 'data/english_TEDtranslate_new.json',
                         'data/english_traveladdiction_new.json']
        photo_file = ['data/album_english_foodgroups.json', 'data/album_english_TEDtranslate.json',
                      'data/album_english_traveladdiction.json']
        friendsnum_file = ['data/english_foodgroups_friendsnum.json', 'data/english_TEDtranslate_friendsnum.json',
                           'data/english_traveladdiction_friendsnum.json']

        all_df = ld.new_dataset(ds_file_array)
        # print all_df.dtypes
        all_df = all_df[['UserID', 'LikesCount', 'SharesCount', 'CommentsCount', 'PostTextLength', 'PostTextPolarity','PostTextSubjectivity', 'ActiveInterests', 'PostTime']].dropna()
        all_df['UserId'] = all_df['UserID']

        all_df['zone'] = 'a'
        all_df = threshold_zones(all_df, polarity_zone, subjectivity_zone)

        zone_dummies = pd.get_dummies(all_df['zone'])
        column_zone_dummies = zone_dummies.columns.tolist()

        ent_ = [x + '_ratio' for x in column_zone_dummies]
        ent_dummies = pd.get_dummies(all_df['entropy_all'])

        all_df = all_df.join(ent_dummies)
        all_df = all_df.join(zone_dummies)

        dict_aggr = {x: np.sum for x in column_zone_dummies}
        dict_aggr.update({'ActiveInterests': np.min})
        dict_aggr.update({x: np.mean for x in ent_})

        aggr_df = all_df.groupby(['UserId'], sort=True).agg(dict_aggr).reset_index()

        # obtain NoPosts, SharedNewsSum, UploadVideoSum
        df_1 = ld.aggr_feature_user(ds_file_array)
        aggr_df = pd.merge(aggr_df, df_1, how='inner', left_on='UserId', right_on='UserID')

        # obtain about
        df_2 = ld.about_dataset(ds_file_array)
        aggr_df = pd.merge(aggr_df, df_2, how='inner', left_on='UserId', right_on='UserID')

        # UserID, NoProfilePhotos, NoCoverPhotos, NoUploadedPhotos, NoPhotos
        df_3 = ld.photo_dataset(photo_file)
        aggr_df = pd.merge(aggr_df, df_3, how='inner', left_on='UserId', right_on='UserID')

        # NumOfFriends
        df_4 = ld.friendsnum_dataset(friendsnum_file)
        aggr_df = pd.merge(aggr_df, df_4, how='inner', left_on='UserId', right_on='UserID')

        # day_part
        df_5 = ld.func_day_part(all_df)
        aggr_df = pd.merge(aggr_df, df_5, how='inner', left_on='UserId', right_on='UserId')

        aggr_df.drop(['userId', 'UserID_y', 'UserID_x'], axis=1, inplace=True)
        # print 'data baru kolom', aggr_df.dtypes

    aggr_df['frequent_day_part'] = aggr_df['frequent_day_part'].map(
        {'Early_Morning': 0, 'Morning': 1, 'Afternoon': 2, 'Evening': 3})
    # aggr_df = all_df.groupby(['UserId'], sort=True).agg(dict_aggr).reset_index()
    # add entropy features
    aggr_df = entropy_features_(aggr_df, ent_)
    return aggr_df, column_zone_dummies #, target
Example #5
0
def dataset_raw(type):
    if type == 'old':
        all_df = ld.all_post()
        all_df = all_df[['postId', 'PostTextLength', 'PostTextPolarity','PostTextSubjectivity']].dropna()
        all_df['UserId'] = all_df['postId'].str.split('_').str.get(0).astype(int)

        # Load golden standard file
        gs_df = pd.read_csv('data/userlevel_all_features_1007.csv', header=0)
        gs_df['UserId'] = 0
        gs_df['UserId'] = gs_df['UserNum']
        gs_df = gs_df[['UserId', 'ActiveInterests']]

        # add PostTextLengthLevel
        postTextLengthLevel_df = ld.separate_postTextLength(all_df)
        print postTextLengthLevel_df
        aggr_df = pd.merge(gs_df, postTextLengthLevel_df, how='inner', left_on='UserId', right_on='UserId')

        """
        # add day part
        day_part_df = ld.func_day_part(all_df)
        aggr_df = pd.merge(aggr_df, day_part_df, how='inner', left_on='UserId', right_on='UserId')
        """
        # harusnya digabung dulu, baru activeinterests dibikin dummies
    elif type == 'new':
        # Load the new dataset, drop null non-text, assign UserId
        ds_file_array = ['data/english_foodgroup_new.json', 'data/english_TEDtranslate_new.json',
                         'data/english_traveladdiction_new.json']
        photo_file = ['data/album_english_foodgroups.json', 'data/album_english_TEDtranslate.json',
                      'data/album_english_traveladdiction.json']
        friendsnum_file = ['data/english_foodgroups_friendsnum.json', 'data/english_TEDtranslate_friendsnum.json',
                           'data/english_traveladdiction_friendsnum.json']

        all_df = ld.new_dataset(ds_file_array)
        # print all_df.dtypes
        # all_df = all_df[['UserID', 'LikesCount', 'SharesCount', 'CommentsCount', 'PostTextLength', 'PostTextPolarity','PostTextSubjectivity', 'ActiveInterests', 'PostTime']].dropna()
        all_df['UserId'] = all_df['UserID']
        all_df = all_df[['UserId', 'ActiveInterests', 'PostTextLength']].dropna()

        dict_aggr = {'ActiveInterests': np.min}
        aggr_df = all_df.groupby(['UserId'], sort=True).agg(dict_aggr).reset_index()

        # add PostTextLengthLevel
        postTextLengthLevel_df = ld.separate_postTextLength(all_df)
        aggr_df = pd.merge(aggr_df, postTextLengthLevel_df, how='inner', left_on='UserId', right_on='UserId')

        # aggr_df = pd.merge(aggr_df, postTextLengthLevel_df, how='inner', left_on='UserId', right_on='UserID')

        """
        # obtain NoPosts, SharedNewsSum, UploadVideoSum
        df_1 = ld.aggr_feature_user(ds_file_array)
        aggr_df = pd.merge(aggr_df, df_1, how='inner', left_on='UserId', right_on='UserID')

        # obtain about
        df_2 = ld.about_dataset(ds_file_array)
        aggr_df = pd.merge(aggr_df, df_2, how='inner', left_on='UserId', right_on='UserID')

        # UserID, NoProfilePhotos, NoCoverPhotos, NoUploadedPhotos, NoPhotos
        df_3 = ld.photo_dataset(photo_file)
        aggr_df = pd.merge(aggr_df, df_3, how='inner', left_on='UserId', right_on='UserID')

        # NumOfFriends
        df_4 = ld.friendsnum_dataset(friendsnum_file)
        aggr_df = pd.merge(aggr_df, df_4, how='inner', left_on='UserId', right_on='UserID')

        # day_part
        df_5 = ld.func_day_part(all_df)
        aggr_df = pd.merge(aggr_df, df_5, how='inner', left_on='UserId', right_on='UserId')

        aggr_df.drop(['userId', 'UserID', 'UserID_y', 'UserID_x'], axis=1, inplace=True)
        # print 'data baru kolom', aggr_df.dtypes

        aggr_df['frequent_day_part'] = aggr_df['frequent_day_part'].map(
            {'Early_Morning': 0, 'Morning': 1, 'Afternoon': 2, 'Evening': 3})
        # aggr_df = all_df.groupby(['UserId'], sort=True).agg(dict_aggr).reset_index()
        # add entropy features
        aggr_df = entropy_features_(aggr_df, ent_)
        """
    # return all_df
    return aggr_df