示例#1
0
 def write_request_to_log(self, dev):
     '''Records the subvolume requested of this sample in a log'''
     if self.log is not None:
         log_line1 = self.sec_name
         log_line2 = "subvolume: [{},{},{}] requested".format(dev[0],dev[1],dev[2])
         utils.write_to_log(self.log, log_line1)
         utils.write_to_log(self.log, log_line2)
def do_point_query(points, raster, csv_pth, num, log_out):
    try:
        u.verify_dir(csv_pth)
        dataset = raster.split('\\')[-2].replace(' ', '_')
        raster_name_to_csv = os.path.basename(raster).replace('.tif', '.csv')
        csv_name = '__'.join([dataset, raster_name_to_csv])
        csv_out = os.path.join(csv_pth, csv_name)
        
        start = timer()
        u.write_to_log('  {}) Raster: {}'.format(num, os.path.basename(raster)), log_out)
        
        stats = point_query(points, raster, interpolate='nearest', geojson_out=True)
        print('    point_query... ({} sec.)'.format(round(timer()-start, 2)))

        start = timer()
        attributes = []
        for item in stats:
            #print ('{}'.format(item['properties']))
            attributes.append(item['properties'])
        print('    append dicts... ({} sec.)'.format(round(timer()-start, 2)))

        start = timer()
        with open(csv_out, 'w', newline='') as outfile:
            fp = csv.DictWriter(outfile, attributes[0].keys())
            fp.writeheader()
            fp.writerows(attributes)
        print('    write to csv... ({} sec.)'.format(round(timer()-start, 2)))
        u.write_to_log('    CSV file: {}'.format(csv_out), log_out)
        u.write_to_log('    Log file: {}'.format(log_out), log_out)
        
    except Exception as e:
        u.write_to_log(str(e), log_out)
        u.write_to_log('FINISH POINTS: {}'.format(time.strftime("%Y-%m-%d  %H:%M:%S")), log_out)
    def get_initial_company_info():
        """Gets the initial information for each company"""

        company_dict = utils.open_json(MONITOR)

        for company in company_dict:
            # Gets symbol for company
            if company_dict[company]["Symbol"] == "unknown":
                try:
                    with urllib.request.urlopen(
                            f'https://finance.yahoo.com/_finance_doubledown/'
                            f'api/resource/searchassist;searchTerm={company}'
                    ) as response:

                        html = response.read().decode()
                        d = json.loads(html)

                        company_dict[company]["Symbol"] = d['items'][0][
                            'symbol']

                except urllib.error.HTTPError as error:
                    utils.write_to_log(f'Error opening URL: {error}')

            # Gets initial share price
            if company_dict[company]["Initial-share-price"] == 1:
                yahoo = Share(company_dict[company]["Symbol"])
                share = yahoo.get_price()
                company_dict[company]["Initial-share-price"] = float(share)
                company_dict[company]["Current-share-price"] = float(share)

        utils.write_to_json(MONITOR, company_dict)
示例#4
0
 def write_request_to_log(self, dev):
     '''Records the subvolume requested of this sample in a log'''
     if self.log is not None:
         log_line1 = self.sec_name
         log_line2 = "subvolume: [{},{},{}] requested".format(
             dev[0], dev[1], dev[2])
         utils.write_to_log(self.log, log_line1)
         utils.write_to_log(self.log, log_line2)
    def check_mentions(self):
        """Checks mentions for sign up's via email or twitter
           via "Sign up / Sign up [email]"""

        try:
            mentions = self.api.mentions_timeline(count=3)

            for mention in mentions:
                if "stop" in mention.text.lower():
                    # Unsubscribe for email
                    if len(mention.text.split()) == 3:
                        email = mention.text.split()[2]
                        email_list = utils.open_file(EMAILS).split()

                        if email in email_list:
                            email_list.remove(email)
                            utils.write_to_file(EMAILS, ' '.join(email_list))

                    # Unsubscribe for Twitter handle
                    else:
                        twitter_name = mention.user.screen_name
                        twitter_name_list = utils.open_file(
                            TWITTER_NAMES).split()

                        if twitter_name in twitter_name_list:
                            twitter_name_list.remove(twitter_name)
                            utils.write_to_file(TWITTER_NAMES,
                                                ' '.join(twitter_name_list))

                elif "sign up" in mention.text.lower():
                    # Email sign up
                    if len(mention.text.split()) > 3:
                        email = mention.text.split()[3]
                        email_list = utils.open_file(EMAILS).split()

                        if email not in email_list:
                            email_list.append(email)
                            utils.append_to_file(EMAILS, email)

                    # Twitter handle sign up
                    else:
                        twitter_name = mention.user.screen_name
                        twitter_name_list = utils.open_file(
                            TWITTER_NAMES).split()

                        if twitter_name not in twitter_name_list:
                            twitter_name_list.append(twitter_name)
                            utils.append_to_file(TWITTER_NAMES, twitter_name)

        except tweepy.TweepError as error:
            utils.write_to_log(f'Error checking mentions: {error}')
示例#6
0
def track_directories(directories, session, bucket_name):

    global aws_session, bucket
    aws_session = session
    bucket = bucket_name

    observer = Observer()

    # Create event handler and set the function to call when event occurs.
    event_handler = FileSystemEventHandler()
    event_handler.on_created = upload_file_to_S3


    # Schedule the observer to monitor every directory in the config file.
    for directory in directories:
        observer.schedule(event_handler, directory, recursive=True)
        write_to_log('Scheduled observer for ' + directory)

    # Start the observer.
    observer.start()

    try:

        write_to_log('Beginning to wait for events.')

        # Constantly wait for events.
        while True:
            time.sleep(1)

    # Stop when user presses Ctrl + C.
    except KeyboardInterrupt:
        write_to_log('Stopping observers...')
        observer.stop()
        observer.join()
        write_to_log('Stopped observers.')
示例#7
0
def upload_file_to_S3(event):

    global observer_paused, aws_session, bucket

    # Don't respond to events if paused flag is true.
    # Make sure to only upload files and ignore directories.
    if observer_paused or not os.path.isfile(event.src_path):
        return

    # Unify folder names. (username replaced with placeholder)
    s3_key = event.src_path.replace(getpass.getuser(), core.user_placeholder).replace('\\', '/')

    s3_client = aws_session.resource('s3')

    s3_client.meta.client.upload_file(event.src_path, bucket, s3_key)

    write_to_log('Uploaded file ' + s3_key)
def main_code(raster_dir, pts, csv_path, log, raster_regex,):
    # Main stuff
    start_all = timer()
    u.write_to_log('\nSTART POINTS: {}'.format(time.strftime("%Y-%m-%d  %H:%M:%S")), log)
    raster_list = []
    for filename in glob.iglob(raster_dir+raster_regex, recursive=True):
        raster_list.append(filename)
        
    count = 0
    for tif in raster_list:
        count+=1
        try:
            do_point_query(pts, tif, csv_path, count, log)
        except Exception as e:
            print(e)

    u.write_to_log('FINISH POINTS: {}\nELAPSED TIME: ({} sec.)'.format(time.strftime("%Y-%m-%d  %H:%M:%S"), round(timer()-start_all, 2)), log)
示例#9
0
def test_write_to_log_func():
    """Test write_to_log function from utils"""
    email = '*****@*****.**'
    event = 'TEST'
    today = datetime.date.today().strftime('%d-%m-%Y')
    record = email + ', ' + event
    log_str = '[' + today + '] -- ' + record + '\n'
    write_to_log(email, event)
    with open(LOG_FILE) as log_file:
        assert log_str in log_file.read()

    # clean log from test records
    with open(LOG_FILE, 'r') as log_file:
        lines = log_file.readlines()
    with open(LOG_FILE, 'w') as log_file:
        for line in lines:
            if line != log_str:
                log_file.write(line)
    def email(self):
        """Emails a list of people"""

        company_output = ', '.join(self.matches)

        try:
            email_list = utils.open_file(EMAILS).split()

            server = smtplib.SMTP('smtp.gmail.com', 587)
            server.starttls()
            server.login(EMAIL, PASSWORD)
            server.sendmail(
                EMAIL, email_list,
                f'Trump just tweeted about {company_output}. '
                f'Might be time to check your shares...')
            server.quit()

        except smtplib.SMTPResponseException as error:
            utils.write_to_log(f'Email error: {error}')
    def share_output():
        """Calls difference_in_shares from the Companies class,
           Outputs the data to twitter."""

        share_difference_dict = Companies().difference_in_shares()

        for company in share_difference_dict:
            try:
                # If you're following one person, this can be changed to look better
                # Remove "[Mentioned by: {company["Handle"]}" in the first line.
                Twitter().api.update_status(
                    f'{company} [Mentioned by: {company["Handle"]} - '
                    f'Initial Share Price: {company["Initial"]} '
                    f'Current Share Price: {company["Current"]}'
                    f'Change: {company["Change"]}'
                    f'Max Change: {company["Max-change"]} ')

            except tweepy.TweepError as error:
                utils.write_to_log(f'Twitter output Error: {error}')
    def tweet(self):
        """Tweets and DMs the company that has been mentioned"""

        try:
            for company in self.matches:
                self.twitter.api.update_status(
                    f'Yelp, looks like a good time to take a look at your shares in {company.upper()}! '
                    f'{self.handle} just mentioned them!')

            twitter_users = utils.open_file(TWITTER_NAMES).split()

            for user in twitter_users:
                time.sleep(2)
                self.twitter.api.send_direct_message(
                    screen_name=user,
                    text=f'{self.handle} just tweeted about {company.upper()}! '
                    f'Might be time to check your shares!')

        except tweepy.TweepError as error:
            utils.write_to_log(f'Error tweeting: {error}')
    def check_tweets(self):
        """Checks the twitter handle for new tweets.
           If there has been a new tweet, checks it in the Companies
           class to see if a company is contained in it"""

        try:
            new_tweet = self.api.user_timeline(screen_name=self.handle,
                                               count=1)

            for tweet in new_tweet:  # Need to find a fix for this loop
                old_tweet = utils.open_file(
                    f'{GENERIC}{self.handle}.txt').strip()

                if old_tweet != tweet.text.encode('utf8'):
                    utils.write_to_file(f'{GENERIC}{self.handle}.txt',
                                        tweet.text)
                    return tweet.text.encode('utf8')

        except tweepy.TweepError as error:
            utils.write_to_log(f'Error checking for new tweets: {error}')
示例#14
0
文件: run.py 项目: murielll/adgo-app
def okta():
    """ View for user creation in Okta """
    result = list()
    data = request.get_data()
    users = users_list_gen(data)

    if not users:
        return 'No users to create!'

    for user in users:
        okta_user = {
            "profile": {
                "firstName": user["firstName"],
                "lastName": user["lastName"],
                "email": user["email"],
                "login": user["email"]
            },
            "credentials": {
                "password": {
                    "value": settings.OKTA_USER_PASS
                }
            },
        }
        if settings.OKTA_USER_GROUP:
            okta_user.update({"groupIds": [
                settings.OKTA_USER_GROUP,
            ]})

        okta_req = post(settings.OKTA_API_URL,
                        headers=settings.OKTA_HEADERS,
                        data=json.dumps(okta_user))

        if okta_req.status_code != 200:
            result.append('%s: %s' %
                          (user['email'],
                           okta_req.json()['errorCauses'][0]['errorSummary']))

        else:
            result.append('%s: user succefully created.' % user['email'])
            write_to_log(user['email'], 'OKTA')
    return '\n'.join(result)
示例#15
0
    def write_request_to_log(self, dev, rft):
        '''Records the subvolume requested of this sample in a log'''

        if self.log is not None:
            rft_string = utils.rft_to_string(rft)

            log_line1 = self.sec_name
            log_line2 = "subvolume: [{},{},{}] requested".format(dev[0],dev[1],dev[2])
            log_line3 = "transformation: {}".format(rft_string)

            utils.write_to_log(self.log, log_line1)
            utils.write_to_log(self.log, log_line2)
            utils.write_to_log(self.log, log_line3)
    def write_request_to_log(self, dev, rft):
        '''Records the subvolume requested of this sample in a log'''

        if self.log is not None:
            rft_string = utils.rft_to_string(rft)

            log_line1 = self.sec_name
            log_line2 = "subvolume: [{},{},{}] requested".format(
                dev[0], dev[1], dev[2])
            log_line3 = "transformation: {}".format(rft_string)

            utils.write_to_log(self.log, log_line1)
            utils.write_to_log(self.log, log_line2)
            utils.write_to_log(self.log, log_line3)
示例#17
0
文件: run.py 项目: murielll/adgo-app
def active_directory():
    """
    View for user creation in Active Directory

    Useful info about python ldap lib
    http://marcitland.blogspot.com/2011/02/python-active-directory-linux.html

    """

    result = list()
    data = request.get_data()
    users = users_list_gen(data)

    if not users:
        return 'No users to create!'

    ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)

    ad_srv = ldap.initialize(settings.AD_SERVER)

    ad_srv.set_option(ldap.OPT_REFERRALS, 0)
    ad_srv.set_option(ldap.OPT_PROTOCOL_VERSION, 3)
    ad_srv.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)
    ad_srv.set_option(ldap.OPT_X_TLS_DEMAND, True)
    ad_srv.set_option(ldap.OPT_DEBUG_LEVEL, 255)

    try:
        ad_srv.start_tls_s()
    except ldap.LDAPError as err:
        msg = get_err_msg(err)
        result.append('Failed to connect to LDAP server! Error message: %s.' %
                      msg)
        return '\n'.join(result)

    ad_srv.simple_bind_s(settings.AD_BIND_DN, settings.AD_ADMIN_PASS)

    for user in users:

        user_dn = 'CN=' + user['fullName'] + ',' + settings.AD_USER_DN

        user_attrs = {}
        user_attrs['objectClass'] = [
            b'top', b'person', b'organizationalPerson', b'user'
        ]
        user_attrs['cn'] = str.encode(user['fullName'])
        user_attrs['userPrincipalName'] = str.encode(user['adName'] + '@' +
                                                     settings.AD_DOMAIN)
        user_attrs['sAMAccountName'] = str.encode(user['adName'])
        user_attrs['givenName'] = str.encode(user['firstName'])
        user_attrs['sn'] = str.encode(user['lastName'])
        user_attrs['displayName'] = str.encode(user['fullName'])
        user_attrs['description'] = str.encode(user['jobtitle'])
        user_attrs['userAccountControl'] = b'514'  # user is disabled

        # add user
        user_ldif = modlist.addModlist(user_attrs)
        try:
            ad_srv.add_s(user_dn, user_ldif)
            result.append('%s: User succefully created.' % user['adName'])
            write_to_log(user['email'], 'AD')
        except ldap.LDAPError as err:
            msg = get_err_msg(err)
            result.append('%s: Failed to create user! Error message: %s' %
                          (user['adName'], msg))
            return '\n'.join(result)

        # change pass
        unicode_pass = '******' + settings.AD_USER_PASS + '\"'
        password_value = unicode_pass.encode('utf-16-le')
        add_pass = [(ldap.MOD_REPLACE, 'unicodePwd', [password_value])]
        try:
            ad_srv.modify_s(user_dn, add_pass)
            result.append('%s: Password has changed succefully.' %
                          user['adName'])
        except ldap.LDAPError as err:
            msg = get_err_msg(err)
            result.append('%s: Failed to change password! Error message: %s.' %
                          (user['adName'], msg))
            return '\n'.join(result)

        # enable user
        mod_acct = [(ldap.MOD_REPLACE, 'userAccountControl', b'512')]
        try:
            ad_srv.modify_s(user_dn, mod_acct)
            result.append('%s: user enabled succefully' % user['adName'])
        except ldap.LDAPError as err:
            msg = get_err_msg(err)
            result.append('%s: Failed to enable user! Error message: %s.' %
                          (user['adName'], msg))
            return '\n'.join(result)
        if settings.AD_GROUP_NAME:
            # add to group
            add_member = [(ldap.MOD_ADD, 'member', str.encode(user_dn))]

            try:
                ad_srv.modify_s(settings.AD_GROUP_DN, add_member)
                result.append('%s: User succefully added to group %s.' %
                              (user['adName'], settings.AD_GROUP_NAME))
            except ldap.LDAPError as err:
                msg = get_err_msg(err)
                result.append(
                    '%s: Failed to add user to group %s! Error message: %s.' %
                    (user['adName'], settings.AD_GROUP_NAME, msg))

    # Disconnect from AD server
    ad_srv.unbind_s()

    return '\n'.join(result)
示例#18
0
def main_code(buff_dir, pt_dir, combine_dir, log):
    u.verify_dir(combine_dir)
    start_all = timer()
    u.write_to_log(
        '\nSTART COMBINE: {}'.format(time.strftime("%Y-%m-%d  %H:%M:%S")), log)
    buff_list = []
    for filename in glob.iglob(buff_dir + '/*.csv'):
        buff_list.append(filename)

    pt_list = []
    for filename in glob.iglob(pt_dir + '/*.csv'):
        pt_list.append(filename)

    match_count = 0
    for buff_csv in buff_list:
        buff_name = os.path.basename(buff_csv)
        process_buff = buff_csv
        for pt_csv in pt_list:
            pt_name = os.path.basename(pt_csv)
            if buff_name == pt_name:
                process_pt = pt_csv
                match_count += 1
                u.write_to_log(
                    '  {}) buffers: {}   points: {}'.format(
                        match_count, buff_name, pt_name), log)

                #csv_name = os.path.basename(os.path.basename(buff_csv).replace('.csv', '_pandas.csv'))
                out_csv = os.path.join(combine_dir, buff_name)

                buff_df = pd.read_csv(buff_csv, dtype='object')
                pt_df = pd.read_csv(pt_csv, dtype='object')

                print('    buff shape: {}'.format(buff_df.shape))
                print('    pt shape:   {}'.format(pt_df.shape))

                merged = pd.merge(left=buff_df, right=pt_df, on='DHSID')
                print('    merge shape: {}'.format(merged.shape))

                for col in list(merged):
                    if col.endswith("_y"):
                        merged = merged.drop(str(col), 1)

                # These select columns by name and merge point values into them if
                # the buffer analysis resulted in a blank value.
                try:
                    merged.loc[merged['mean'].isnull(),
                               'mean'] = merged['value']
                except:
                    print('      no MEAN column')

                try:
                    merged.loc[merged['sum'].isnull(), 'sum'] = merged['value']
                except:
                    print('      no SUM column')

                try:
                    merged.loc[merged['majority'].isnull(),
                               'majority'] = merged['value']
                except:
                    print('      no MAJORITY column')

                try:
                    merged = merged.drop('nodata', 1)
                except:
                    print('      no NODATA column')
                merged = merged.drop('value', 1)

                merged.to_csv(out_csv, sep=',')

    u.write_to_log(
        'FINISH COMBINE: {}\nELAPSED TIME: {} sec.'.format(
            time.strftime("%Y-%m-%d  %H:%M:%S"), round(timer() - start_all,
                                                       3)), log)
示例#19
0
import dirtracker
import syncmonitor
import utils
import threading

user_placeholder = 'USER'

#---------------------------------------------------------
# Run order
#---------------------------------------------------------
if __name__ == '__main__':

    config = utils.get_config()
    credentials = utils.get_credentials(config)
    aws_session = utils.get_session(credentials, config)

    # Start the sync monitor in a separate daemon thread.
    sync = threading.Thread(target=syncmonitor.sync_monitor, args=(aws_session, config), daemon=True)
    sync.start()
    utils.write_to_log('Started sync monitor.')

    # Start monitoring for new files in directories using main thread.
    utils.write_to_log('Started directory trackers.')
    dirtracker.track_directories(config['folders'], aws_session, config['bucket'])
示例#20
0
def main( conf_file='config.cfg', logfile=None ):
    #%% parameters
    print "reading config parameters..."
    config, pars = zconfig.parser( conf_file )

    if pars.has_key('logging') and pars['logging']:
        print "recording configuration file..."
        zlog.record_config_file( pars )

        logfile = zlog.make_logfile_name( pars )

    #%% create and initialize the network
    if pars['train_load_net'] and os.path.exists(pars['train_load_net']):
        print "loading network..."
        net = znetio.load_network( pars )
        # load existing learning curve
        lc = zstatistics.CLearnCurve( pars['train_load_net'] )
        # the last iteration we want to continue training
        iter_last = lc.get_last_it()
    else:
        if pars['train_seed_net'] and os.path.exists(pars['train_seed_net']):
            print "seeding network..."
            net = znetio.load_network( pars, is_seed=True )
        else:
            print "initializing network..."
            net = znetio.init_network( pars )
        # initalize a learning curve
        lc = zstatistics.CLearnCurve()
        iter_last = lc.get_last_it()

    # show field of view
    print "field of view: ", net.get_fov()

    # total voxel number of output volumes
    vn = utils.get_total_num(net.get_outputs_setsz())

    # set some parameters
    print 'setting up the network...'
    eta = pars['eta']
    net.set_eta( pars['eta'] )
    net.set_momentum( pars['momentum'] )
    net.set_weight_decay( pars['weight_decay'] )

    # initialize samples
    outsz = pars['train_outsz']
    print "\n\ncreate train samples..."
    smp_trn = zsample.CSamples(config, pars, pars['train_range'], net, outsz, logfile)
    print "\n\ncreate test samples..."
    smp_tst = zsample.CSamples(config, pars, pars['test_range'],  net, outsz, logfile)

    # initialization
    elapsed = 0
    err = 0.0 # cost energy
    cls = 0.0 # pixel classification error
    re = 0.0  # rand error
    # number of voxels which accumulate error
    # (if a mask exists)
    num_mask_voxels = 0

    if pars['is_malis']:
        malis_cls = 0.0

    print "start training..."
    start = time.time()
    total_time = 0.0
    print "start from ", iter_last+1

    #Saving initialized network
    if iter_last+1 == 1:
        znetio.save_network(net, pars['train_save_net'], num_iters=0)
        lc.save( pars, 0.0 )

    for i in xrange(iter_last+1, pars['Max_iter']+1):
        # get random sub volume from sample
        vol_ins, lbl_outs, msks, wmsks = smp_trn.get_random_sample()

        # forward pass
        # apply the transformations in memory rather than array view
        vol_ins = utils.make_continuous(vol_ins, dtype=pars['dtype'])
        props = net.forward( vol_ins )

        # cost function and accumulate errors
        props, cerr, grdts = pars['cost_fn']( props, lbl_outs, msks )
        err += cerr
        cls += cost_fn.get_cls(props, lbl_outs)
        num_mask_voxels += utils.sum_over_dict(msks)

        # gradient reweighting
        grdts = utils.dict_mul( grdts, msks  )
        grdts = utils.dict_mul( grdts, wmsks )

        if pars['is_malis'] :
            malis_weights, rand_errors = cost_fn.malis_weight(pars, props, lbl_outs)
            grdts = utils.dict_mul(grdts, malis_weights)
            # accumulate the rand error
            re += rand_errors.values()[0]
            malis_cls_dict = utils.get_malis_cls( props, lbl_outs, malis_weights )
            malis_cls += malis_cls_dict.values()[0]


        total_time += time.time() - start
        start = time.time()

        # test the net
        if i%pars['Num_iter_per_test']==0:
            lc = test.znn_test(net, pars, smp_tst, vn, i, lc)

        if i%pars['Num_iter_per_show']==0:
            # normalize
            if utils.dict_mask_empty(msks):
                err = err / vn / pars['Num_iter_per_show']
                cls = cls / vn / pars['Num_iter_per_show']
            else:
                err = err / num_mask_voxels / pars['Num_iter_per_show']
                cls = cls / num_mask_voxels / pars['Num_iter_per_show']

            lc.append_train(i, err, cls)

            # time
            elapsed = total_time / pars['Num_iter_per_show']

            if pars['is_malis']:
                re = re / pars['Num_iter_per_show']
                lc.append_train_rand_error( re )
                malis_cls = malis_cls / pars['Num_iter_per_show']
                lc.append_train_malis_cls( malis_cls )

                show_string = "iteration %d,    err: %.3f, cls: %.3f, re: %.6f, mc: %.3f, elapsed: %.1f s/iter, learning rate: %.6f"\
                              %(i, err, cls, re, malis_cls, elapsed, eta )
            else:
                show_string = "iteration %d,    err: %.3f, cls: %.3f, elapsed: %.1f s/iter, learning rate: %.6f"\
                    %(i, err, cls, elapsed, eta )

            if pars.has_key('logging') and pars['logging']:
                utils.write_to_log(logfile, show_string)
            print show_string

            # reset err and cls
            err = 0
            cls = 0
            re = 0
            num_mask_voxels = 0

            if pars['is_malis']:
                malis_cls = 0

            # reset time
            total_time  = 0
            start = time.time()

        if i%pars['Num_iter_per_annealing']==0:
            # anneal factor
            eta = eta * pars['anneal_factor']
            net.set_eta(eta)

        if i%pars['Num_iter_per_save']==0:
            # save network
            znetio.save_network(net, pars['train_save_net'], num_iters=i)
            lc.save( pars, elapsed )
            if pars['is_malis']:
                utils.save_malis(malis_weights,  pars['train_save_net'], num_iters=i)

        # run backward pass
        grdts = utils.make_continuous(grdts, dtype=pars['dtype'])
        net.backward( grdts )
示例#21
0
def main(args):
    config, pars, logfile = parse_args(args)
    #%% create and initialize the network
    net, lc = znetio.create_net(pars)

    # total voxel number of output volumes
    vn = utils.get_total_num(net.get_outputs_setsz())

    # initialize samples
    outsz = pars['train_outsz']
    print "\n\ncreate train samples..."
    smp_trn = zsample.CSamples(config, pars, pars['train_range'], net, outsz,
                               logfile)
    print "\n\ncreate test samples..."
    smp_tst = zsample.CSamples(config, pars, pars['test_range'], net, outsz,
                               logfile)

    if pars['is_check']:
        import zcheck
        zcheck.check_patch(pars, smp_trn)
        # gradient check is not working now.
        # zcheck.check_gradient(pars, net, smp_trn)

    # initialization
    eta = pars['eta']
    elapsed = 0
    err = 0.0  # cost energy
    cls = 0.0  # pixel classification error
    re = 0.0  # rand error
    # number of voxels which accumulate error
    # (if a mask exists)
    num_mask_voxels = 0

    if pars['is_malis']:
        malis_cls = 0.0
        malis_eng = 0.0
    else:
        malis_weights = None

    # the last iteration we want to continue training
    iter_last = lc.get_last_it()

    print "start training..."
    start = time.time()
    total_time = 0.0
    print "start from ", iter_last + 1

    #Saving initial/seeded network
    # get file name
    fname, fname_current = znetio.get_net_fname(pars['train_net_prefix'],
                                                iter_last,
                                                suffix="init")
    znetio.save_network(net, fname, pars['is_stdio'])
    lc.save(pars, fname, elapsed=0.0, suffix="init_iter{}".format(iter_last))
    # no nan detected
    nonan = True

    for i in xrange(iter_last + 1, pars['Max_iter'] + 1):
        # time cumulation
        total_time += time.time() - start
        start = time.time()

        # get random sub volume from sample
        vol_ins, lbl_outs, msks, wmsks = smp_trn.get_random_sample()

        # forward pass
        # apply the transformations in memory rather than array view
        vol_ins = utils.make_continuous(vol_ins)
        props = net.forward(vol_ins)

        # cost function and accumulate errors
        props, cerr, grdts = pars['cost_fn'](props, lbl_outs, msks)
        err += cerr
        cls += cost_fn.get_cls(props, lbl_outs)
        # compute rand error
        if pars['is_debug']:
            assert not np.all(lbl_outs.values()[0] == 0)
        re += pyznn.get_rand_error(props.values()[0], lbl_outs.values()[0])
        num_mask_voxels += utils.sum_over_dict(msks)

        # check whether there is a NaN here!
        if pars['is_debug']:
            nonan = nonan and utils.check_dict_nan(vol_ins)
            nonan = nonan and utils.check_dict_nan(lbl_outs)
            nonan = nonan and utils.check_dict_nan(msks)
            nonan = nonan and utils.check_dict_nan(wmsks)
            nonan = nonan and utils.check_dict_nan(props)
            nonan = nonan and utils.check_dict_nan(grdts)
            if not nonan:
                utils.inter_save(pars, net, lc, vol_ins, props, lbl_outs, \
                             grdts, malis_weights, wmsks, elapsed, i)
                # stop training
                return

        # gradient reweighting
        grdts = utils.dict_mul(grdts, msks)
        if pars['rebalance_mode']:
            grdts = utils.dict_mul(grdts, wmsks)

        if pars['is_malis']:
            malis_weights, rand_errors, num_non_bdr = cost_fn.malis_weight(
                pars, props, lbl_outs)
            if num_non_bdr <= 1:
                # skip this iteration
                continue
            grdts = utils.dict_mul(grdts, malis_weights)
            dmc, dme = utils.get_malis_cost(props, lbl_outs, malis_weights)
            malis_cls += dmc.values()[0]
            malis_eng += dme.values()[0]

        # run backward pass
        grdts = utils.make_continuous(grdts)
        net.backward(grdts)

        total_time += time.time() - start
        start = time.time()

        if i % pars['Num_iter_per_show'] == 0:
            # time
            elapsed = total_time / pars['Num_iter_per_show']

            # normalize
            if utils.dict_mask_empty(msks):
                err = err / vn / pars['Num_iter_per_show']
                cls = cls / vn / pars['Num_iter_per_show']
            else:
                err = err / num_mask_voxels / pars['Num_iter_per_show']
                cls = cls / num_mask_voxels / pars['Num_iter_per_show']
            re = re / pars['Num_iter_per_show']
            lc.append_train(i, err, cls, re)

            if pars['is_malis']:
                malis_cls = malis_cls / pars['Num_iter_per_show']
                malis_eng = malis_eng / pars['Num_iter_per_show']
                lc.append_train_malis_cls(malis_cls)
                lc.append_train_malis_eng(malis_eng)

                show_string = "update %d,    cost: %.3f, pixel error: %.3f, rand error: %.3f, me: %.3f, mc: %.3f, elapsed: %.1f s/iter, learning rate: %.5f"\
                              %(i, err, cls, re, malis_eng, malis_cls, elapsed, eta )
            else:
                show_string = "update %d,    cost: %.3f, pixel error: %.3f, rand error: %.3f, elapsed: %.1f s/iter, learning rate: %.5f"\
                    %(i, err, cls, re, elapsed, eta )

            if pars.has_key('logging') and pars['logging']:
                utils.write_to_log(logfile, show_string)
            print show_string

            # reset err and cls
            err = 0
            cls = 0
            re = 0
            num_mask_voxels = 0

            if pars['is_malis']:
                malis_cls = 0

            # reset time
            total_time = 0
            start = time.time()

        # test the net
        if i % pars['Num_iter_per_test'] == 0:
            # time accumulation should skip the test
            total_time += time.time() - start
            lc = test.znn_test(net, pars, smp_tst, vn, i, lc)
            start = time.time()

        if i % pars['Num_iter_per_save'] == 0:
            utils.inter_save(pars, net, lc, vol_ins, props, lbl_outs, \
                             grdts, malis_weights, wmsks, elapsed, i)

        if i % pars['Num_iter_per_annealing'] == 0:
            # anneal factor
            eta = eta * pars['anneal_factor']
            net.set_eta(eta)

        # stop the iteration at checking mode
        if pars['is_check']:
            print "only need one iteration for checking, stop program..."
            break
示例#22
0
文件: run.py 项目: murielll/adgo-app
def gmail():
    """ View for user creation in Gmail """
    result = list()
    data = request.get_data()
    users = users_list_gen(data)

    if not users:
        return 'No users to create!'

    scopes = [
        'https://www.googleapis.com/auth/admin.directory.user',
        'https://www.googleapis.com/auth/admin.directory.group'
    ]
    creds = None

    if os.path.exists('token.pickle'):
        with open('token.pickle', 'rb') as token:
            creds = pickle.load(token)

    if not creds or not creds.valid:
        if creds and creds.expired and creds.refresh_token:
            creds.refresh(Request())
        else:
            flow = InstalledAppFlow.from_client_secrets_file(
                settings.GMAIL_CREDENTIALS, scopes)
            creds = flow.run_local_server(port=0)

        with open('token.pickle', 'wb') as token:
            pickle.dump(creds, token)

    service = build('admin', 'directory_v1', credentials=creds)

    for user in users:
        gmail_user = {
            'primaryEmail': user['email'],
            'name': {
                'givenName': user['firstName'],
                'familyName': user['lastName']
            },
            'orgUnitPath': settings.GMAIL_ORG_UNIT_PATH,
            'password': settings.GMAIL_USER_PASS
        }

        member = {'email': user['email']}
        # add user
        try:
            service.users().insert(body=gmail_user).execute()
            result.append('%s: user succefully created.' % user['email'])
            write_to_log(user['email'], 'GMAIL')
        except HttpError as err:
            msg = json.loads(err.content.decode())['error']['message']
            result.append('%s: Failed to create user. Error message: %s' %
                          (user['email'], msg))
            return '\n'.join(result)
        # add user to group
        if settings.GMAIL_GROUP:
            try:
                service.members().insert(groupKey=settings.GMAIL_GROUP,
                                         body=member).execute()
                result.append('%s: User succefully added to group %s.' %
                              (user['email'], settings.GMAIL_GROUP))
            except HttpError as err:
                msg = json.loads(err.content.decode())['error']['message']
                result.append(
                    '%s: Failed to add user to group %s. Error message: %s.' %
                    (user['email'], settings.GMAIL_GROUP, msg))

    return '\n'.join(result)
示例#23
0
def main( conf_file='config.cfg', logfile=None ):
    #%% parameters
    print "reading config parameters..."
    config, pars = front_end.parser( conf_file )

    if pars.has_key('logging') and pars['logging']:
        print "recording configuration file..."
        front_end.record_config_file( pars )

        logfile = front_end.make_logfile_name( pars )

    #%% create and initialize the network
    if pars['train_load_net'] and os.path.exists(pars['train_load_net']):
        print "loading network..."
        net = netio.load_network( pars )
        # load existing learning curve
        lc = zstatistics.CLearnCurve( pars['train_load_net'] )
    else:
        if pars['train_seed_net'] and os.path.exists(pars['train_seed_net']):
            print "seeding network..."
            net = netio.load_network( pars, is_seed=True )
        else:
            print "initializing network..."
            net = netio.init_network( pars )
        # initalize a learning curve
        lc = zstatistics.CLearnCurve()

    # show field of view
    print "field of view: ", net.get_fov()
    print "output volume info: ", net.get_outputs_setsz()

    # set some parameters
    print 'setting up the network...'
    vn = utils.get_total_num(net.get_outputs_setsz())
    eta = pars['eta'] #/ vn
    net.set_eta( eta )
    net.set_momentum( pars['momentum'] )
    net.set_weight_decay( pars['weight_decay'] )

    # initialize samples
    outsz = pars['train_outsz']
    print "\n\ncreate train samples..."
    smp_trn = front_end.CSamples(config, pars, pars['train_range'], net, outsz, logfile)
    print "\n\ncreate test samples..."
    smp_tst = front_end.CSamples(config, pars, pars['test_range'],  net, outsz, logfile)

    # initialization
    elapsed = 0
    err = 0
    cls = 0

    # interactive visualization
    plt.ion()
    plt.show()

    # the last iteration we want to continue training
    iter_last = lc.get_last_it()


    print "start training..."
    start = time.time()
    print "start from ", iter_last+1
    for i in xrange(iter_last+1, pars['Max_iter']+1):
        vol_ins, lbl_outs, msks = smp_trn.get_random_sample()

        # forward pass
        vol_ins = utils.make_continuous(vol_ins, dtype=pars['dtype'])

        props = net.forward( vol_ins )

        # cost function and accumulate errors
        props, cerr, grdts = pars['cost_fn']( props, lbl_outs )
        err = err + cerr
        cls = cls + cost_fn.get_cls(props, lbl_outs)

        # mask process the gradient
        grdts = utils.dict_mul(grdts, msks)

        # run backward pass
        grdts = utils.make_continuous(grdts, dtype=pars['dtype'])
        net.backward( grdts )

        if pars['is_malis'] :
            malis_weights = cost_fn.malis_weight(props, lbl_outs)
            grdts = utils.dict_mul(grdts, malis_weights)

        if i%pars['Num_iter_per_test']==0:
            # test the net
            lc = test.znn_test(net, pars, smp_tst, vn, i, lc)

        if i%pars['Num_iter_per_show']==0:
            # anneal factor
            eta = eta * pars['anneal_factor']
            net.set_eta(eta)
            # normalize
            err = err / vn / pars['Num_iter_per_show']
            cls = cls / vn / pars['Num_iter_per_show']
            lc.append_train(i, err, cls)

            # time
            elapsed = time.time() - start
            elapsed = elapsed / pars['Num_iter_per_show']

            show_string = "iteration %d,    err: %.3f,    cls: %.3f,   elapsed: %.1f s/iter, learning rate: %.6f"\
                    %(i, err, cls, elapsed, eta )

            if pars.has_key('logging') and pars['logging']:
                utils.write_to_log(logfile, show_string)
            print show_string

            if pars['is_visual']:
                # show results To-do: run in a separate thread
                front_end.inter_show(start, lc, eta, vol_ins, props, lbl_outs, grdts, pars)
                if pars['is_rebalance'] and 'aff' not in pars['out_type']:
                    plt.subplot(247)
                    plt.imshow(msks.values()[0][0,0,:,:], interpolation='nearest', cmap='gray')
                    plt.xlabel('rebalance weight')
                if pars['is_malis']:
                    plt.subplot(248)
                    plt.imshow(malis_weights.values()[0][0,0,:,:], interpolation='nearest', cmap='gray')
                    plt.xlabel('malis weight (log)')
                plt.pause(2)
                plt.show()
            # reset err and cls
            err = 0
            cls = 0
            # reset time
            start = time.time()

        if i%pars['Num_iter_per_save']==0:
            # save network
            netio.save_network(net, pars['train_save_net'], num_iters=i)
            lc.save( pars, elapsed )
def main(conf_file='config.cfg', logfile=None):
    #%% parameters
    print "reading config parameters..."
    config, pars = front_end.parser(conf_file)

    if pars.has_key('logging') and pars['logging']:
        print "recording configuration file..."
        front_end.record_config_file(pars)

        logfile = front_end.make_logfile_name(pars)

    #%% create and initialize the network
    if pars['train_load_net'] and os.path.exists(pars['train_load_net']):
        print "loading network..."
        net = netio.load_network(pars)
        # load existing learning curve
        lc = zstatistics.CLearnCurve(pars['train_load_net'])
    else:
        if pars['train_seed_net'] and os.path.exists(pars['train_seed_net']):
            print "seeding network..."
            net = netio.seed_network(pars, is_seed=True)
        else:
            print "initializing network..."
            net = netio.init_network(pars)
        # initalize a learning curve
        lc = zstatistics.CLearnCurve()

    # show field of view
    print "field of view: ", net.get_fov()
    print "output volume info: ", net.get_outputs_setsz()

    # set some parameters
    print 'setting up the network...'
    vn = utils.get_total_num(net.get_outputs_setsz())
    eta = pars['eta']  #/ vn
    net.set_eta(eta)
    net.set_momentum(pars['momentum'])
    net.set_weight_decay(pars['weight_decay'])

    # initialize samples
    outsz = pars['train_outsz']
    print "\n\ncreate train samples..."
    smp_trn = front_end.CSamples(config, pars, pars['train_range'], net, outsz,
                                 logfile)
    print "\n\ncreate test samples..."
    smp_tst = front_end.CSamples(config, pars, pars['test_range'], net, outsz,
                                 logfile)

    # initialization
    elapsed = 0
    err = 0
    cls = 0

    # interactive visualization
    plt.ion()
    plt.show()

    # the last iteration we want to continue training
    iter_last = lc.get_last_it()

    print "start training..."
    start = time.time()
    print "start from ", iter_last + 1
    for i in xrange(iter_last + 1, pars['Max_iter'] + 1):
        vol_ins, lbl_outs, msks = smp_trn.get_random_sample()

        # forward pass
        vol_ins = utils.make_continuous(vol_ins, dtype=pars['dtype'])

        props = net.forward(vol_ins)

        # cost function and accumulate errors
        props, cerr, grdts = pars['cost_fn'](props, lbl_outs)
        err = err + cerr
        cls = cls + cost_fn.get_cls(props, lbl_outs)

        # mask process the gradient
        grdts = utils.dict_mul(grdts, msks)

        # run backward pass
        grdts = utils.make_continuous(grdts, dtype=pars['dtype'])
        net.backward(grdts)

        if pars['is_malis']:
            malis_weights = cost_fn.malis_weight(props, lbl_outs)
            grdts = utils.dict_mul(grdts, malis_weights)

        if i % pars['Num_iter_per_test'] == 0:
            # test the net
            lc = test.znn_test(net, pars, smp_tst, vn, i, lc)

        if i % pars['Num_iter_per_show'] == 0:
            # anneal factor
            eta = eta * pars['anneal_factor']
            net.set_eta(eta)
            # normalize
            err = err / vn / pars['Num_iter_per_show']
            cls = cls / vn / pars['Num_iter_per_show']
            lc.append_train(i, err, cls)

            # time
            elapsed = time.time() - start
            elapsed = elapsed / pars['Num_iter_per_show']

            show_string = "iteration %d,    err: %.3f,    cls: %.3f,   elapsed: %.1f s/iter, learning rate: %.6f"\
                    %(i, err, cls, elapsed, eta )

            if pars.has_key('logging') and pars['logging']:
                utils.write_to_log(logfile, show_string)
            print show_string

            if pars['is_visual']:
                # show results To-do: run in a separate thread
                front_end.inter_show(start, lc, eta, vol_ins, props, lbl_outs,
                                     grdts, pars)
                if pars['is_rebalance'] and 'aff' not in pars['out_type']:
                    plt.subplot(247)
                    plt.imshow(msks.values()[0][0, 0, :, :],
                               interpolation='nearest',
                               cmap='gray')
                    plt.xlabel('rebalance weight')
                if pars['is_malis']:
                    plt.subplot(248)
                    plt.imshow(malis_weights.values()[0][0, 0, :, :],
                               interpolation='nearest',
                               cmap='gray')
                    plt.xlabel('malis weight (log)')
                plt.pause(2)
                plt.show()
            # reset err and cls
            err = 0
            cls = 0
            # reset time
            start = time.time()

        if i % pars['Num_iter_per_save'] == 0:
            # save network
            netio.save_network(net, pars['train_save_net'], num_iters=i)
            lc.save(pars, elapsed)