Exemplo n.º 1
0
def mergeComJob(comPath, jobPath):
    comList = util.read_list(comPath)
    jobList = util.read_list(jobPath)
    jobDict = {}
    for job in jobList:
        if job['companyId'] not in jobDict:
            jobDict[job['companyId']] = []
        jobDict[job['companyId']].append(job)

    return comList, jobDict
Exemplo n.º 2
0
def backup_reload(save_first):
    global mod_list, comment_accs, comment_texts, suggestions

    if save_first:
        util.save_list_as(mod_list, os.path.join('data', 'mod_list'))
        util.save_list_as(comment_accs, os.path.join('data', 'comment_accs'))
        util.save_list_as(comment_texts, os.path.join('data', 'comment_texts'))
        util.save_list_as(suggestions, os.path.join('data', 'suggestions'))

    mod_list = util.read_list(os.path.join('data', 'mod_list'))
    comment_accs = util.read_list(os.path.join('data', 'comment_accs'))
    comment_texts = util.read_list(os.path.join('data', 'comment_texts'))
    suggestions = util.read_list(os.path.join('data', 'suggestions'))
Exemplo n.º 3
0
def main(args):
    image_paths = read_list(args.image_list)
    for path in image_paths:
        # landmarks_file should have the same prefix as image_file
        landmarks_file = path[:-3] + 'txt'
        im = Image.open(path)
        width, height = im.size
        landmarks = load_landmarks(landmarks_file)
        landmarks[:,1] = height - landmarks[:,1]
        # select contour points
        #contour_points = get_contour_side(landmarks)
        # generate a contour curve with contour points
        hull = ConvexHull(landmarks)
        # draw landmarks
        lm = np.array(im)
        for i in range(landmarks.shape[0]):
            rr, cc = draw.circle(height-landmarks[i,1].astype('int32'), landmarks[i,0].astype('int32'), 5)
            lm[rr, cc, :] = np.array((255, 0, 0))
        # create mask
        mask = np.zeros((height, width))
        rr, cc = draw.polygon(height-landmarks[hull.vertices,1], landmarks[hull.vertices,0], mask.shape)
        mask[rr,cc] = 1

        path = path[:-1] if path[:-1] == '/' else path
        image_name = path[path.rindex('/')+1:-4] + '_contour.png'
        show_result(lm, mask, np.tile((mask!=0)[:,:,np.newaxis], (1,1,3)) * im, save=True, filename='images/'+image_name)

        # add CRF
        prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 +
                               mask[np.newaxis, :, :]*0.1,
                               mask[np.newaxis, :, :]*0.9 +
                               (1-mask)[np.newaxis, :, :]*0.1), axis=0)
        map = CRF(prob, np.array(im))
Exemplo n.º 4
0
def main(args):
    image_paths = read_list(args.image_list)
    for path in image_paths:
        im = open_image(path)
        # resize for memory
        width, height = im.size
        if height > 800:
            im = im.resize((int(800 * width / height), 800))
            width, height = im.size

        # use 2D-FAN detect landmarks
        fa = FaceAlignment(LandmarksType._2D,
                           enable_cuda=True,
                           flip_input=False,
                           use_cnn_face_detector=True)
        try:
            landmarks = fa.get_landmarks(np.array(im))[-1]
            landmarks[:, 1] = height - landmarks[:, 1]
        except:
            continue

        # generate a contour curve with contour points
        hull = ConvexHull(landmarks)
        # draw landmarks
        lm = np.array(im)
        for i in range(landmarks.shape[0]):
            rr, cc = draw.circle(height - landmarks[i, 1].astype('int32'),
                                 landmarks[i, 0].astype('int32'), 5)
            lm[rr, cc, :] = np.array((255, 0, 0))
        # create mask
        mask = np.zeros((height, width))
        rr, cc = draw.polygon(height - landmarks[hull.vertices, 1],
                              landmarks[hull.vertices, 0], mask.shape)
        mask[rr, cc] = 1

        save = True if args.save == 'True' else False
        path = path[:-1] if path[:-1] == '/' else path
        image_name = path[path.rindex('/') + 1:-4] + '_contour_nocrf.png'
        show_result(lm,
                    mask,
                    np.tile((mask != 0)[:, :, np.newaxis], (1, 1, 3)) * im,
                    save=save,
                    filename='images/' + image_name)

        # add CRF
        #prob = np.concatenate(((1-mask)[np.newaxis,:,:]*0.9 + mask[np.newaxis, :, :]*0.1, mask[np.newaxis, :, :]*0.9 + (1-mask)[np.newaxis, :, :]*0.1), axis=0)
        prob = ndimage.gaussian_filter(mask * 1.0, sigma=5)
        prob = np.concatenate(
            ((1 - prob)[np.newaxis, :, :], prob[np.newaxis, :, :]), axis=0)

        map = CRF(prob, np.array(im))
        image_name = path[path.rindex('/') + 1:-4] + '_contour_crf.png'
        show_result(im,
                    map,
                    np.tile((map != 0)[:, :, np.newaxis], (1, 1, 3)) * im,
                    save=save,
                    filename='images/' + image_name)
Exemplo n.º 5
0
def add_column(s_file, R, s_name, l_separator=True):
    """Add an extra column using value R array to an existing heat map.
    s_file: str, file name without extension, it will modify .cdt and .atr
    R: array(int/float), values to add
    s_name: str, column name
    l_separator: bool, default True. If True, add a column of blank value to separate the new column from existing ones."""
    if re.search('\.\w{3}$', s_file):
        s_file = s_file[:-4]
    if not os.path.exists(s_file + '.cdt'):
        util.error_msg("File not exist: " + s_file + ".cdt!")
    f = open(s_file + '.cdt')
    S = []
    cnt = 0

    while True:
        line = f.readline()
        if not line: break
        SS = line.strip().split("\t")
        if SS[0].startswith('GENE'):
            if l_separator:
                SS.append('')
            SS.append('%.2f' % R[cnt])
            cnt += 1
        elif SS[0] == 'GID':
            if l_separator:
                SS.append('separator')
            SS.append(s_name)
        elif SS[0] == 'AID':
            X = [int(re.sub(r'\D', '', x)) for x in SS if x.startswith('ARRY')]
            n_array = max(X) + 1
            SS.append('ARRY%dX' % n_array)
            if l_separator:
                SS.append('ARRY%dX' % (n_array + 1))
        elif SS[0] == 'EWEIGHT':
            if l_separator:
                SS.append('0')
            SS.append('0')
        S.append(SS)
    f.close()
    S = ["\t".join(X) for X in S]
    util.save_list(s_file + '.cdt', S, s_end="\n")

    if os.path.exists(s_file + '.atr'):
        S = util.read_list(s_file + '.atr')
        SS = S[-1].split("\t")
        n_node = int(re.sub(r'\D', '', SS[0])) + 1
        S.append('NODE%dX\tNODE%dX\tARRY%dX\t0' %
                 (n_node, n_node - 1, n_array))
        if l_separator:
            S.append('NODE%dX\tNODE%dX\tARRY%dX\t0' %
                     (n_node + 1, n_node, n_array + 1))
        util.save_list(s_file + '.atr', S, s_end="\n")
Exemplo n.º 6
0
def main():
    api = setup_api(consumer_key, consumer_secret, access_key, access_secret)
    file_path = 'Name of List accounts'
    accounts = read_list(file_path)
    for account in accounts:
        try:
            friend = api.create_friendship(account)
            if friend.screen_name == account:
                print 'Follow ' + account + ' success'
            else:
                print 'Follow ' + account + ' failed'
        except tweepy.TweepError, e:
            print e
def main():
    api = setup_api(consumer_key, consumer_secret, access_key, access_secret)
    file_path = 'Name of List accounts'
    accounts = read_list(file_path)
    for account in accounts:
	try:
		friend = api.create_friendship(account)
		if friend.screen_name == account:
			print 'Follow ' + account + ' success'
		else:
			print 'Follow ' + account + ' failed'
	except tweepy.TweepError, e:
		print e
Exemplo n.º 8
0
def main():
    api = setup_api(consumer_key, consumer_secret, access_key, access_secret)

    file_path = "tweets.txt"
    tweet_data = read_list(file_path)
    tweet_text = pick_random_element(tweet_data)
    # get target
    target_accounts = api.friends()
    done = False
    while not done:
        target_account = pick_random_element(target_accounts)
        is_followed = is_followed_by(api, target_screen_name=target_account.screen_name)
        if is_good_account(target_account) and not is_followed:
            reply(api, tweet_text, target_account)
            done = True
Exemplo n.º 9
0
def main():
    api = setup_api(consumer_key, consumer_secret, access_key, access_secret)
    file_path = 'famous_accounts.txt'
    accounts = read_list(file_path)
    for account in accounts:
        print '### From follower of ', account
        try:
            followers = api.followers(account)
            for follower in followers:
                if is_good_account(follower):
                    print follower.screen_name
                    try:
                        friend = api.create_friendship(follower.screen_name)
                        if friend.screen_name == follower.screen_name:
                            print 'Follow ' + follower.screen_name + ' success'
                        else:
                            print 'Follow ' + follower.screen_name + ' failed'
                    except tweepy.TweepError, e:
                        print e
        except Exception, e:
            print e
Exemplo n.º 10
0
 def parse_hp(self, s_file):
     #parses the human phenotype file (assumes that there's a .txt file called "hp.txt")
     pc_dict = dict(
     )  #parent-child dictionary (key: parent, value: children)
     S = util.read_list(s_file)
     i = 0
     n = len(S)
     while i < n:
         while i < n and S[i].rstrip() != '[Term]':
             i += 1
         if i >= n: break
         i += 1
         while S[i].rstrip() != '':
             contents = S[i].rstrip().split(': ')
             if contents[0] == 'id':
                 id = contents[1]
             elif contents[0] == 'is_a':
                 parent = contents[1].split(' ! ')[0]
                 if parent not in pc_dict:
                     pc_dict[parent] = []
                 pc_dict[parent].append(id)
             i += 1
     return pc_dict
Exemplo n.º 11
0
def main():
    # Step 1: get links for projects
    if len(sys.argv) > 1 and sys.argv[1] == "list":
        driver = webdriver.Firefox()
        projects = list_projects.get_project_links(driver)
        util.write_list(projects)
        driver.close()
    else:
        projects = util.read_list()

    # Step 2: get data for every project
    output = dict()
    output["records"] = {"record": []}

    for project_index, project_link in enumerate(projects):
        output["records"]["record"].append(
            get_data.get_data_from_url(project_link, project_index + 1))
        print("Crawled:\t%d/%d" % (project_index + 1, len(projects)))

        # Have mercy on KickStarter :)
        time.sleep(MERCY_TIME)

    # Write into JSON file
    util.write_dict(output)
Exemplo n.º 12
0
rnn_size = 64
# Number of Layers
num_layers = 2
# Learning Rate
learning_rate = 0.0001
# Dropout Keep Probability
keep_probability = 0.8
display_step = 10

save_path = 'checkpoints/sup/actor/dev'
save_path_critic = 'checkpoints/sup/critic/dev'

(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = util.load_preprocess('preprocess.p')
load_path_actor = util.load_params('params_actor_sup.p')

source_train = util.read_list('source_train.npy')
target_train = util.read_list('target_train.npy')
valid_size = batch_size * 10
train_source = source_train[valid_size:]
train_target = target_train[valid_size:]
valid_source = source_train[:valid_size]
valid_target = target_train[:valid_size]

test_acc_list = []

train_graph = tf.Graph()
critic_graph = tf.Graph()
actor_graph = tf.Graph()

train_sess = tf.Session(graph=train_graph)
critic_sess = tf.Session(graph=critic_graph)
Exemplo n.º 13
0
def allele_validation(allele):
    if not allele:
        allele = util.read_list(util.DEFAULT_ALLELE_LIST[1])

    return allele
Exemplo n.º 14
0
import message

LINKS_FILE = 'state/chan_link_persistent.txt'

link, link_install, uninstall = util.LinkSet().triple()

# c2.lower() in links[c1.lower()] iff c1 is broadcast to c2.
links = dict()

# c1.lower() in decay_links if c1's link will eventually decay.
decay_links = set()

# (c1.lower(), c2.lower()) in persistent_links if a link from c1 to c2
# will be created upon the next restart.
try:
    persistent_links = set(util.read_list(LINKS_FILE))
except IOError:
    persistent_links = set()

def install(bot):
    for c1, c2 in persistent_links:
        c1, c2 = c1.lower(), c2.lower()
        if c1 not in links: links[c1] = set()
        links[c1].add(c2)
    link_install(bot)

def reload(prev):
    if hasattr(prev, 'links') and isinstance(prev.links, dict):
        links.update(prev.links)
    if hasattr(prev, 'decay_links') and isinstance(prev.decay_links, set):
        for decay_chan in prev.decay_links:
Exemplo n.º 15
0
from datetime import datetime
from itertools import *
import re

from untwisted.magic import sign

from runtime import later
import util
import channel
import message

link, install, uninstall = util.LinkSet().triple()

bridges = util.read_list('conf/bridge.py', default=True)
substitutions = util.read_list('conf/substitute.py', default=True)


@link('IRC')
@link('MINECRAFT')
@link('TERRARIA')
def h_msg(bot, source, msg, source_name=None, **kwds):
    if source is None:
        return
    if type(msg) is unicode:
        msg = msg.encode('utf8')
    for source, target in targets(source):
        name = source_name or source
        pmsg = '%s: %s' % (name, msg)
        yield sign('BRIDGE', bot, target, pmsg, source, **kwds)

def notice(bot, source, head, *args, **kwds):
Exemplo n.º 16
0
def read_rolls():
    return util.read_list(LOG_FILE, default=True)
Exemplo n.º 17
0
    #Cache.load(tax_id=0, l_use_GPDB=True)
    Cache.load(tax_id=0, S_DB=['BioGrid','GeneGO'], l_use_GPDB=True)
    Cache.info()
    exit()
    ppi=PPI(l_use_GPDB=True, tax_id=9606)
    print(list(Cache.ppi_data['GPDB'].keys()))
    #ppi.T_node.to_csv('t1.csv')
    #ppi.T_edge.to_csv('t2.csv')
    print(ppi.data['132884'])
    S_node=['132884','191','537']
    test=ppi.subnetwork(S_node)
    print(test.nof_nodes())
    exit()

    ## example
    S_node=util.read_list('~/RM_Hits.txt')
    test=ppi.subnetwork(S_node)
    test.to_xgmml('RM_.xgmml')
    exit()
    S_node=util.read_list('~/CM_Hits.txt')
    test=ppi.subnetwork(S_node)
    test.to_xgmml('CM_.xgmml')

    exit()
    #print ppi.T_node[:5]
    #print ppi.T_edge[:5]
    test=ppi.subnetwork(S_node)
    #print test
    exit()

    mc=MCODE(net)
Exemplo n.º 18
0
import tensorflow as tf
import numpy as np
import util

# Batch Size
batch_size = 128

_, (source_vocab_to_int, target_vocab_to_int), (
    source_int_to_vocab,
    target_int_to_vocab) = util.load_preprocess('preprocess.p')
load_path_sup = util.load_params('params_actor_sup.p')
load_path_actor = util.load_params('params_actor_reinforce.p')

source_test = util.read_list('source_test.npy')
target_test = util.read_list('target_test.npy')

test_acc_list = []

loaded_graph_sup = tf.Graph()
loaded_graph_actor = tf.Graph()
sup_sess = tf.Session(graph=loaded_graph_sup)
actor_sess = tf.Session(graph=loaded_graph_actor)
with sup_sess.as_default():
    with loaded_graph_sup.as_default():
        # Load saved model and restore the saved variables
        loader = tf.train.import_meta_graph(load_path_sup + '.meta')
        loader.restore(sup_sess, load_path_sup)

        input_data = loaded_graph_sup.get_tensor_by_name('input:0')
        logits = loaded_graph_sup.get_tensor_by_name('predictions:0')
        target_sequence_length = loaded_graph_sup.get_tensor_by_name(
Exemplo n.º 19
0
        print(idx)


def processJobData(job):
    job['postTime'] = time.strftime('%Y-%m-%d %H:%M:%S',
                                    time.localtime(time.time()))
    job['requireDegree'] = random.randint(1, 3)
    job['minSalary'] = random.randint(8, 12)
    job['maxSalary'] = job['minSalary'] + random.randint(1, 4)
    job['minExperience'] = random.randint(1, 5)
    job['maxExperience'] = job['minExperience'] + random.randint(1, 2)
    job['workType'] = 2


def processComData(com):
    com['type'] = random.randint(1, 3)


if __name__ == '__main__':
    dbh = DBHelper.DBHelper()
    jobList = util.read_list('jobs/jobs_1_31.txt')
    for job in jobList:
        processJobData(job)
    dbh.insertJob(jobList)
    # print(len(jobList))
    # mergeInDB('company/lagou_com_total.txt', 'jobs/lagou_jobs_100.txt')
    # mergeInDB('company/lagou_com_total.txt', 'jobs/lagou_jobs_200.txt')
    # mergeInDB('company/lagou_com_total.txt', 'jobs/lagou_jobs_400-600.txt')

# mergeInDB('lagou_com.txt','lagou_com_jobs.txt')