Пример #1
0
def pull_request(app, pr_title, branch, base="staging", owner='frappe'):
    global config

    pr_body = config.get("pr_body")
    in_test = config.get("in_test")
    git_in_test = config.get("git_in_test")
    github_username = config.get("github_username")
    github_password = config.get("github_password")
    url = 'https://api.github.com/repos/{0}/{1}/pulls'.format(owner, app)

    args = {
        "body":
        pr_body,
        "title":
        pr_title,
        "head":
        "{github_username}:{branch}".format(github_username=github_username,
                                            branch=branch),
        "base":
        base
    }

    if (not in_test) and (not git_in_test):
        r = requests.post(url,
                          auth=HTTPBasicAuth(github_username, github_password),
                          data=json.dumps(args))
        r.raise_for_status()

    print "created pull request for {0}".format(app)
def GetROIBaselineModel(learningRateName='LEARNING_RATE',
                        stepCountName='NB_STEPS',
                        batchSizeName='BATCH_SIZE',
                        keepProbName='KEEP_PROB',
                        optimizer='ADAM',
                        firstHiddenLayerUnits=96,
                        secondHiddenLayerUnits=0):
    ############ DEFINE PLACEHOLDERS, LOSS ############
    predictionLayer = baselineROICNN(
        matricesPL,
        trainingPL,
        keepProbability=get('TRAIN.ROI_BASELINE.%s' % keepProbName),
        firstHiddenLayerUnits=firstHiddenLayerUnits,
        secondHiddenLayerUnits=secondHiddenLayerUnits)
    lossFunction = tf.losses.mean_squared_error(labels=labelsPL,
                                                predictions=predictionLayer)

    ############ DEFINE OPTIMIZER ############
    if optimizer == 'ADAM':
        trainOperation = AdamOptimizer(
            lossFunction, get('TRAIN.ROI_BASELINE.%s' % learningRateName))
    elif optimizer == 'GRAD_DECAY':
        trainOperation = ScheduledGradOptimizer(
            lossFunction,
            baseLearningRate=get('TRAIN.ROI_BASELINE.%s' % learningRateName))

    ############ DEFINE LEARNING PARAMETERS ############
    stepCount = get('TRAIN.ROI_BASELINE.%s' % stepCountName)
    batchSize = get('TRAIN.ROI_BASELINE.%s' % batchSizeName)

    return predictionLayer, lossFunction, trainOperation, stepCount, batchSize
Пример #3
0
def push(app, repo_path, branch, commit_msg, commit=True):
    from utils import config
    in_test = config.get("in_test")
    git_in_test = config.get("git_in_test")

    if not all([repo_path, branch, commit_msg]):
        raise Exception("Invalid arguments")

    if ("{app}" in commit_msg) and ("{date}" in commit_msg):
        commit_msg = commit_msg.format(app=app, date=now_date())

    repo = Repo(repo_path)
    if commit:
        repo.git.add('--all')
        repo.index.commit(commit_msg)

    args = [
        '{branch}:{branch}'.format(branch=branch),
    ]
    # push the changes
    if (not in_test) and (not git_in_test):
        repo.git.push('origin', *args)

    print "pushed the changes to repo with commit message\n{0}".format(
        commit_msg)
Пример #4
0
def isAlertTime():
    alertFrom = datetime.strptime(config.get("alerttimewindow", "fromtime"),
                                  "%H:%M").time()
    alertTill = datetime.strptime(config.get("alerttimewindow", "totime"),
                                  "%H:%M").time()
    now = datetime.now().time()
    return now >= alertFrom and now < alertTill
Пример #5
0
    def load_config(self):
        config.load()

        adsrconf = config.get('adsr')
        if adsrconf:
            for k, v in adsrconf.iteritems():
                setattr(self.adsrList, k, v)
            self.adsrw.adsr = self.adsrList

        sounds = config.get('sounds')
        if sounds:
            for snd in sounds['files']:
                self.add_sound(str(snd))

        globs = config.get('globals')
        if globs:
            self.invert_control = globs['invert_control']
            self.trigger_shadow = globs['trigger_shadow']
            if self.invert_control:
                self.invert.setCheckState(QtCore.Qt.Checked)
            if self.trigger_shadow:
                self.invert_slope.setCheckState(QtCore.Qt.Checked)

            self.sldVolumen.setValue(globs['default_volume'])

            for x in xrange(globs['sensor_count']):
                self.add_sensor()
Пример #6
0
def getAlertMessage(read, eTrend, bIsConssitant, wind_change, change_time):
    str_verb = strings_dict[eTrend.name]["verb"]
    str_trend = strings_dict[eTrend.name]["label"] + (
        " " + strings_dict[bIsConssitant]["label"]
        if eTrend != WindTrend.STEADY else "")
    str_sub_message = str.format(
        "{verb} בכ {change}kn ", verb=str_verb,
        change=wind_change) if eTrend != WindTrend.STEADY else ""
    str_meaasge = str.format("{beach} - {trend}: {current}kn - {gust}kn {direction_name}-{direction}\n" + \
                            "{sub_message}ב {change_time} דקות האחרונות\n" + \
                            "זמן קריאה: {read_date} {read_time}\n" + \
                            "מקור [{source}]({source_url})", \
                            beach=config.get(read[consts.WINDREADSFIELDS.INFO_SOURCE_NAME], "beachName"), \
                            current=read[consts.WINDREADSFIELDS.WIND_AVG], \
                            gust=read[consts.WINDREADSFIELDS.WIND_GUST], \
                            direction_name=read[consts.WINDREADSFIELDS.WIND_DIR_NAME], \
                            direction=read[consts.WINDREADSFIELDS.WIND_DIR], \
                            trend=str_trend, \
                            sub_message=str_sub_message, \
                            verb=str_verb, \
                            change=wind_change,
                            change_time=change_time, \
                            read_date=read[consts.WINDREADSFIELDS.INFO_DATE], \
                            read_time=read[consts.WINDREADSFIELDS.INFO_TIME], \
                            source=config.get(read[consts.WINDREADSFIELDS.INFO_SOURCE_NAME], "frindlyName"), \
                            source_url=read[consts.WINDREADSFIELDS.INFO_SOURCE_URL])
    return str_meaasge
Пример #7
0
def GetBaselineLSTMModel(timecoursePL,
                         learningRateName='LEARNING_RATE',
                         stepCountName='NB_STEPS',
                         batchSizeName='BATCH_SIZE',
                         optimizer='ADAM'):
    ############ DEFINE PLACEHOLDERS, LOSS ############
    predictionLayer = baselineLSTM(timecoursePL=timecoursePL)
    lossFunction = tf.losses.mean_squared_error(labels=labelsPL,
                                                predictions=predictionLayer)

    ############ DEFINE OPTIMIZER ############
    if optimizer == 'ADAM':
        trainOperation = AdamOptimizer(lossFunction,
                                       get('TRAIN.LSTM_BASELINE.%s' %
                                           learningRateName),
                                       clipGrads=True)
    elif optimizer == 'GRAD_DECAY':
        trainOperation = ScheduledGradOptimizer(
            lossFunction,
            baseLearningRate=get('TRAIN.LSTM_BASELINE.%s' % learningRateName))

    ############ DEFINE LEARNING PARAMETERS ############
    stepCount = get('TRAIN.LSTM_BASELINE.%s' % stepCountName)
    batchSize = get('TRAIN.LSTM_BASELINE.%s' % batchSizeName)

    return predictionLayer, lossFunction, trainOperation, stepCount, batchSize
Пример #8
0
	def getValidation(self,numbers,type):
		im_array = np.zeros(0)
		first=1
		if(type == "images"):
			for num in numbers:
				strFilename = get('DATA.ImageDirectoryTrain')+ str(int(num)) + ".png"
				encodedImage = scipy.misc.imread(strFilename)
				if first == 1:
					first = 0
					im_array = encodedImage[np.newaxis,...]
				else:
					im_array = np.concatenate((im_array,encodedImage[np.newaxis,...]),axis=0)
		elif(type == "masks"):
			for num in numbers:
				strFilename = get('DATA.ImageDirectoryTrainMask') + str(int(num)) + ".png"
				encodedImage = scipy.misc.imread(strFilename)
				if first == 1:
					first = 0
					im_array = encodedImage[np.newaxis,...]
				else:
					im_array = np.concatenate((im_array,encodedImage[np.newaxis,...]),axis=0)
		elif(type == "normals"):
			for num in numbers:
				strFilename = get('DATA.ImageDirectoryTrainOutput') + str(int(num)) + ".png"
				encodedImage = scipy.misc.imread(strFilename)
				if first == 1:
					first = 0
					im_array = encodedImage[np.newaxis,...]
				else:
					im_array = np.concatenate((im_array,encodedImage[np.newaxis,...]),axis=0)
		return im_array
Пример #9
0
def getWinds():
    """
    Downloads the page where the list of mathematicians is found
    and returns a list of strings, one per mathematician
    """
    for source in [
            consts.SOURCEREAD.PRIGAL, consts.SOURCEREAD.EILAT_METEO_TECH,
            consts.SOURCEREAD.DOR_NACHSHOLIM, consts.SOURCEREAD.SURFO
    ]:
        url = config.get(source, "readsURL") + "?t=" + str(
            datetime.datetime.now().strftime("%d/%m/%Y-%H:%M:%S"))
        windUnits = config.get(source, "windUnits")
        response = simple_get(url)
        if response is not None:
            try:
                html = BeautifulSoup(response, 'html.parser')
                info = windInfo(source, url, windUnits)
                scrapWebData(html, info)
                jsonInfo = info.toJSON()
                firedata.writeWindReads(jsonInfo)
                res = firedata.readWindReads(source)
                wind_tracker.sense_for_wind_change(res)
            except Exception as e:
                log_error('Exception during response handling: {0}'.format(
                    str(e)))
        else:
            # Raise an exception if we failed to get any data from the url
            # raise Exception('Error retrieving contents at {}'.format(url))
            log_error('Error retrieving contents at {}'.format(url))
Пример #10
0
def restoreAttention():
    additionalArgs = [{
        'flag': '--strideSize',
        'help':
        'The stride to chunk MRI images into. Typical values are 10, 15, 20, 30, 40, 60.',
        'action': 'store',
        'type': int,
        'dest': 'strideSize',
        'required': True
    }, {
        'flag': '--type',
        'help': 'One of: traditional, reverse',
        'action': 'store',
        'type': str,
        'dest': 'type',
        'required': True
    }, {
        'flag': '--attention',
        'help': 'One of: 0, 1, 2, 3',
        'action': 'store',
        'type': int,
        'dest': 'attention',
        'required': True
    }]
    ParseArgs('Run 3D CNN over structural MRI volumes',
              additionalArgs=additionalArgs,
              useDefaults=False)
    if GlobalOpts.strideSize <= 0:
        GlobalOpts.strideSize = None
    GlobalOpts.imageBaseString = get('DATA.STRUCTURAL.DOWNSAMPLE_PATH')
    GlobalOpts.trainFiles = np.load(get('DATA.TRAIN_LIST')).tolist()
    GlobalOpts.imageBatchDims = (-1, 61, 73, 61, 1)
    GlobalOpts.trainBatchSize = 4
    GlobalOpts.augment = 'none'
    GlobalOpts.name = 'attention{}_{}_stride{}'.format(GlobalOpts.attention,
                                                       GlobalOpts.type,
                                                       GlobalOpts.strideSize)
    modelTrainer = ModelTrainer()
    GlobalOpts.checkpointDir = '{}{}/'.format('../checkpoints/attention_comp/',
                                              GlobalOpts.name)
    imagesPL, labelsPL = StructuralPlaceholders(GlobalOpts.imageBatchDims)
    trainingPL = TrainingPlaceholder()

    if GlobalOpts.type == 'traditional':
        GlobalOpts.convLayers = [8, 16, 32, 64]
    elif GlobalOpts.type == 'reverse':
        GlobalOpts.convLayers = [64, 32, 16, 8]

    GlobalOpts.randomAttentionStarter = False
    if GlobalOpts.attention == 0:
        GlobalOpts.attentionMapBools = [True, False, False, False]
    elif GlobalOpts.attention == 1:
        GlobalOpts.attentionMapBools = [False, True, False, False]
    elif GlobalOpts.attention == 2:
        GlobalOpts.attentionMapBools = [True, True, False, False]
    elif GlobalOpts.attention == 3:
        GlobalOpts.attentionMapBools = [False, False, False, False]
        GlobalOpts.randomAttentionStarter = True
    saveFilters(GlobalOpts, imagesPL, trainingPL)
Пример #11
0
def train_cnn(input_layer, prediction_layer, loss_func, optimizer, faces):
	''' Train CNN '''
	try:
		for batch_index in range(get('TRAIN.CNN.NB_STEPS')):
			report_training_progress(batch_index, input_layer, loss_func, faces)
			batch_images, batch_masks, batch_normals = faces.train.next_batch(get('TRAIN.CNN.BATCH_SIZE'))
			optimizer.run(feed_dict={input_layer: batch_images, masks: batch_masks, true_normals: batch_normals})
	
	except KeyboardInterrupt:
		print('OK, I will stop training even though I am not finished.')
Пример #12
0
def update_benches():
    """
		update the both frappe-bench and release-bench
	"""
    from utils import config

    update_bench = config.get("update_bench")
    release_bench = config.get("release_bench")

    for bench in [update_bench, release_bench]:
        exec_cmd(bench, ['bench update --no-backup --reset'])
def MatrixPlaceholders():
    """
    Returns input and output placeholders for the connectivity matrices in the data file.
    """
    matricesPL = tf.placeholder(dtype=tf.float32,
                                shape=(None, get('DATA.MATRICES.DIMENSION'),
                                       get('DATA.MATRICES.DIMENSION'), 1),
                                name='matricesPL')
    labelsPL = tf.placeholder(dtype=tf.float32,
                              shape=(None, 1),
                              name='labelsPL')
    return (matricesPL, labelsPL)
Пример #14
0
 def readDateTime(self, Value):
     readDateTime = self.getDateTime(Value)
     if readDateTime is not None:
         self._infoDateTime = readDateTime
         dFormat = config.get(
             self.infoSourceName, "dateFormat") if config.get(
                 self.infoSourceName, "dateFormat") else '%d/%m/%y'
         tFormat = config.get(
             self.infoSourceName, "timeFormat") if config.get(
                 self.infoSourceName, "timeFormat") else '%H:%M'
         self.infoDate = readDateTime.strftime(dFormat)
         self.infoTime = readDateTime.strftime(tFormat)
def SlicePlaceholders():
    """
    Returns input and output placeholders for the slice images in the /data directory.
    """
    slicesPL = tf.placeholder(dtype=tf.float32,
                              shape=(None, get('DATA.SLICES.DIMENSION'),
                                     get('DATA.SLICES.DIMENSION'), 1),
                              name='slicesPL')
    labelsPL = tf.placeholder(dtype=tf.float32,
                              shape=(None, 1),
                              name='labelsPL')
    return (slicesPL, labelsPL)
Пример #16
0
 def __init__(self):
     set_section("db")
     self.__username = get('username')
     self.__password = get('passwd')
     self.__db = get('dbname')
     self.__port = int(get('port'))
     self.__host = get('host')
     self.__charset = 'utf8'
     self.conn = None
     self.cursor = None
     self.times = 0
     self.connect_db()
Пример #17
0
def getDateTime(val):
    ''' This function will check the type of the val
            if its a string type then uses regex to split the time from the string characters
            ie if val is 01/02 09:20 then will return 09:20 '''
    val.strip
    dateTimeFormat = config.get(
        consts.SOURCEREAD.EILAT_METEO_TECH, "dateFormat") + " " + config.get(
            consts.SOURCEREAD.EILAT_METEO_TECH, "timeFormat")
    readDateTime = datetime.datetime.strptime(val.strip(), dateTimeFormat)
    if readDateTime.year == 1900:
        readDateTime = readDateTime.replace(year=datetime.datetime.now().year)
    return readDateTime
def TimecoursePlaceholders():
    """
    Returns input and output placeholders for the timecourses in the /data directory.
    """
    timecoursePL = tf.placeholder(tf.float32,
                                  shape=(None,
                                         get('DATA.TIMECOURSES.SEQ_LENGTH'),
                                         get('DATA.TIMECOURSES.SEQ_WIDTH')),
                                  name='timecoursePL')
    labelsPL = tf.placeholder(dtype=tf.float32,
                              shape=(None, 1),
                              name='labelsPL')
    return (timecoursePL, labelsPL)
Пример #19
0
def build_docs(apps, _pull=False):
    """ build frappe app's documentation """
    from utils import config

    release_bench = config.get("release_bench")
    release_bench_site = config.get("release_bench_site")
    commit_msg = config.get("docs_commit_msg")
    target_app_mapper = config.get("docs_target_apps")
    base_branches = config.get("base_branch_mapper") or {}

    branch = "docs-{0}".format(now_date(format='%Y.%m.%d'))
    for app in apps:
        try:
            apps_path = os.path.join(release_bench, 'apps', app)
            if not apps_path:
                print "app is not installed"

            target = target_app_mapper.get(app, {})
            target_app = target.get("app", None)
            owner = target.get("owner", None)
            if not target_app or not owner:
                print "target app mapping not available"

            target_app_path = os.path.join(release_bench, 'apps', target_app)
            if not target_app_path:
                print "app is not installed"

            if _pull:
                pull(apps_path, "upstream", base_branches.get(app, "develop"))
                pull(target_app_path, "upstream",
                     base_branches.get(target_app, "master"))

            checkout(target_app_path, branch, create_new=True)

            # build docs
            exec_cmd(release_bench, \
             ['bench --site {0} build-docs --target {1} {2}'.format(release_bench_site, target_app, app)])

            push(app, target_app_path, branch, commit_msg, commit=True)
            pull_request(target_app,
                         commit_msg,
                         branch,
                         base=base_branches.get(target_app, "master"),
                         owner=owner)
            checkout(target_app_path,
                     base_branches.get(target_app, "master"),
                     delete_branch_after_checkout=False,
                     delete_branch=branch)
        except Exception as e:
            print e
Пример #20
0
    def save_config(self):
        adsrconf = config.get('adsr')
        for param in adsr_params.keys():
            adsrconf[param] = getattr(self.adsrList, param)

        sounds = config.get('sounds')
        sounds['files'] = self.sounds[:]

        globs = config.get('globals')
        globs['invert_control'] = self.invert_control
        globs['trigger_shadow'] = self.trigger_shadow
        globs['default_volume'] = self.sldVolumen.value()
        globs['sensor_count'] = len(self.players)

        config.save()
Пример #21
0
def train_autoencoder(input_layer, loss_func, optimizer, faces):
    ''' Train autoencoder. '''
    # TODO: ensure `config.json` specifies a number of training steps, learning
    #       rate, and batch size in accordance with the project specifications.
    #       You will not learn theory from this exercise, but you will glimpse
    #       how, sometimes, programming is plumbing: connecting up the pipes
    #       until the system works.
    try:
        for batch_index in range(get('TRAIN.NB_STEPS')):
            report_training_progress(
                batch_index, input_layer, loss_func, faces)
            batch = faces.train.next_batch(get('TRAIN.BATCH_SIZE'))
            optimizer.run(feed_dict={input_layer: batch[0]})
    except KeyboardInterrupt:
        print('OK, I will stop training even though I am not finished.')
def compareDownsampling():
    additionalArgs = [
            {
            'flag': '--scale',
            'help': 'The scale at which to slice dimensions. For example, a scale of 2 means that each dimension will be devided into 2 distinct regions, for a total of 8 contiguous chunks.',
            'action': 'store',
            'type': int,
            'dest': 'scale',
            'required': True
            },
            {
            'flag': '--type',
            'help': 'One of: depth, reverse, constant.',
            'action': 'store',
            'type': str,
            'dest': 'type',
            'required': True
            },
            {
            'flag': '--downscaleRate',
            'help': 'One of 1, 2, 3.',
            'action': 'store',
            'type': int,
            'dest': 'downscaleRate',
            'required': True
            }
            ]
    ParseArgs('Run 3D CNN over structural MRI volumes', additionalArgs=additionalArgs)
    GlobalOpts.trainFiles = np.load(get('DATA.TRAIN_LIST')).tolist()
    GlobalOpts.valdFiles = np.load(get('DATA.VALD_LIST')).tolist()
    GlobalOpts.testFiles = np.load(get('DATA.TEST_LIST')).tolist()
    GlobalOpts.augment = 'none'
    GlobalOpts.name = '{}_scale{}_sample{}'.format(GlobalOpts.type, GlobalOpts.scale, GlobalOpts.downscaleRate)
    if GlobalOpts.downscaleRate == 1:
        GlobalOpts.imageBaseString = get('DATA.STRUCTURAL.NUMPY_PATH')
        GlobalOpts.imageBatchDims = (-1, 121, 145, 121, 1)
    elif GlobalOpts.downscaleRate == 2:
        GlobalOpts.imageBaseString = get('DATA.STRUCTURAL.DOWNSAMPLE_PATH')
        GlobalOpts.imageBatchDims = (-1, 61, 73, 61, 1)
    elif GlobalOpts.downscaleRate == 3:
        GlobalOpts.imageBaseString = get('DATA.STRUCTURAL.EXTRA_SMALL_PATH')
        GlobalOpts.imageBatchDims = (-1, 41, 49, 41, 1)
    
    GlobalOpts.trainBatchSize = 4
    if GlobalOpts.type == 'depth':
        GlobalOpts.cnn = depthPatchCNN
    elif GlobalOpts.type == 'reverse':
        GlobalOpts.cnn = reverseDepthCNN
    elif GlobalOpts.type == 'constant':
        GlobalOpts.cnn = constantDepthCNN

    modelTrainer = ModelTrainer()

    GlobalOpts.summaryDir = '{}{}/'.format(get('TRAIN.CNN_BASELINE.SUMMARIES_DIR'),
                                                     GlobalOpts.name)
    GlobalOpts.checkpointDir = '{}{}/'.format(get('TRAIN.CNN_BASELINE.CHECKPOINT_DIR'),
                                                     GlobalOpts.name)
    RunTestOnDirs(modelTrainer)
Пример #23
0
    def sign_in(self, onetime_code=None):
        """ Load token. If not available the user must provide a
            one time code from https://my.remarkable.com/connect/remarkable
        """

        try:
            # Get device token if not stored local
            device_token = cfg.get("authentication.device_token")
            if device_token == None:
                if onetime_code is None or onetime_code == "":
                    self.listener_handler.publish(EVENT_ONETIMECODE_NEEDED)
                    return

                device_token = self._get_device_token(onetime_code)
                if device_token is None:
                    self.listener_handler.publish(EVENT_DEVICE_TOKEN_FAILED)
                    return

            # Renew the user token.
            user_token = self._get_user_token(device_token)
            if user_token is None:
                self.listener_handler.publish(EVENT_USER_TOKEN_FAILED)
                return

            # Save tokens to config
            auth = {"device_token": device_token, "user_token": user_token}
            cfg.save({"authentication": auth})

            # Inform all subscriber
            self.listener_handler.publish(EVENT_SUCCESS, auth)
        except:
            auth = {}
            self.listener_handler.publish(EVENT_FAILED, auth)

        return auth
Пример #24
0
def SliceCNN(imagesPL, trainingPL, keepProbability=get('TRAIN.CNN_BASELINE.KEEP_PROB'), defaultActivation=tf.nn.elu, optionalHiddenLayerUnits=0, downscaleRate=None):
    with tf.variable_scope('ConvolutionalNetwork'):
        if imagesPL.dtype != tf.float32:
            imagesPL = tf.cast(imagesPL, tf.float32, name='CastInputToFloat32')

        if downscaleRate:
            if isinstance(downscaleRate, int):
                downscaleSize = [1, downscaleRate, downscaleRate, 1]
                imagesPL = pool2D(imagesPL, kernel_size=downscaleSize, strides=downscaleSize)
            elif isinstance(downscaleRate, (list, tuple)) and len(downscaleRate) == 2:
                downscaleSize = [1, downscaleRate[0], downscaleRate[1], 1]
                imagesPL = pool2D(imagesPL, kernel_size=downscaleSize, strides=downscaleSize)
            else:
                raise ValueError('Unrecognized downscale rate: {}'.format(downscaleRate))

        ################## FIRST BLOCK ##################
        Block1 = block2D(imagesPL, trainingPL, blockNumber=1, filters=8)

        ################## SECOND BLOCK ##################
        Block2 = block2D(Block1, trainingPL, blockNumber=2, filters=16)

        ################## THIRD BLOCK ##################
        Block3 = block2D(Block2, trainingPL, blockNumber=3, filters=32)

        with tf.variable_scope('FullyConnectedLayers'):
            flattenedLayer = tf.layers.flatten(Block3)
            if optionalHiddenLayerUnits > 0:
                optionalHiddenLayer = standardDense(inputs=flattenedLayer, units=optionalHiddenLayerUnits, activation=defaultActivation, name='optionalHiddenLayer')
                droppedOutHiddenLayer = tf.contrib.layers.dropout(inputs=optionalHiddenLayer, keep_prob=keepProbability, is_training=trainingPL)
                flattenedLayer = droppedOutHiddenLayer

            numberOfUnitsInOutputLayer = 1
            outputLayer = standardDense(flattenedLayer, units=numberOfUnitsInOutputLayer, activation=None, use_bias=False, name='outputLayer')
    return outputLayer
Пример #25
0
def get_url(section, image):
    # Get the url for the specific image command - from on config file
    url = None
    if section and image:
        url = config.get(section, image) + "?t=" + datetime.datetime.now().strftime("%d\%m\%Y-%H:%M:%S")

    return url
Пример #26
0
    async def connect(self, hostname, port):
        try:
            hostname = hostname.encode('utf-8')
        except AttributeError:
            pass

        # config
        server = config.get('default', 'server')
        server_port = config.getint('default', 'server_port')

        loop = asyncio.get_event_loop()
        # 和proxyclient建立连接
        try:
            transport, client = await loop.create_connection(
                ProxyClient, server, server_port)
        # 连接失败
        except Exception:
            logging.error('Could not connect server: {}:{}'.format(
                server, server_port))
            if self.transport.can_write_eof():
                self.transport.write_eof()
            return False

        # 绑定server_transport和trasport
        client.server_transport = self.transport
        self.client_transport = transport
        # 发送地址信息, 域名和端口
        self.client_transport.write(
            pack('!i%ssH' % len(hostname), len(hostname), hostname, port))
Пример #27
0
    def sync(self):
        if self.state == model.item.STATE_SYNCING:
            return

        self.state = model.item.STATE_SYNCING
        self._update_state_listener()

        self._download_raw()
        self._write_remapy_file()
        self._update_state(inform_listener=False)

        annotations_exist = os.path.exists(self.path_rm_files)

        if self.type == TYPE_NOTEBOOK and annotations_exist:
            render.notebook(self.path,
                            self.id(),
                            self.path_annotated_pdf,
                            self.is_landscape(),
                            path_templates=cfg.get("general.templates"))

        else:
            if annotations_exist:
                # Also for epubs a pdf file exists which we can annotate :)
                # We will then show the pdf rather than the epub...
                render.pdf(self.path_rm_files, self.path_original_pdf,
                           self.path_annotated_pdf, self.path_oap_pdf)

        self._update_state()
        self.parent().sync()
Пример #28
0
def test2(dataSet):
    print('----------------------------------------------------------------')
    print('----------------------------TEST 2------------------------------')
    print('----------------------------------------------------------------')
    X = np.array(
        [mat[np.tril_indices(mat.shape[0], k=-1)] for mat in dataSet.matrices])
    Y = np.array(dataSet._df['AgeYears'].values.copy())
    Y = np.reshape(Y, [Y.shape[0], 1])

    imagesPL, predictionLayer = cnnNeural()
    labelsPL = tf.placeholder(tf.float32, shape=[None, 1])
    lossFunction = tf.losses.mean_squared_error(labels=labelsPL,
                                                predictions=predictionLayer)
    global_step = tf.Variable(0, name='global_step', trainable=False)
    trainOperation = tf.train.AdamOptimizer(
        get('TRAIN.CNN.LEARNING_RATE')).minimize(lossFunction,
                                                 global_step=global_step)

    RepeatModelXY(X,
                  Y,
                  imagesPL,
                  labelsPL,
                  predictionLayer,
                  trainOperation,
                  lossFunction,
                  numRepeats=10)
Пример #29
0
def get_target_coverage():
    global target_coverage
    if target_coverage is None:
        target_coverage = config.get()
        raise ValueError(
            "stats.get_target_coverage(): target_coverage not set")
    return target_coverage
Пример #30
0
def create_admin_user():
    admin = User("administrator",
                 "changeme",
                 "The",
                 "Administrator",
                 config.get('admin', 'admin_email'),
                 validated=True)
    session.add(admin)
    session.flush()

    admin_role = UserRoles(admin, "Administrator", False)
    session.add(admin_role)
    session.flush()

    for role in UserRoles.roles:
        if role != "Administrator":
            new_role = UserRoles(admin, role, True)
            session.add(new_role)
            session.flush()

    admin.add_change(admin)
    session.flush()

    session.commit()
    return admin
Пример #31
0
    def _extract_features(self, gallery_images, config):
        # extract gallery features
        if config["dist_type"] == "hamming":
            gallery_features = np.zeros(
                [len(gallery_images), config['embedding_size'] // 8],
                dtype=np.uint8)
        else:
            gallery_features = np.zeros(
                [len(gallery_images), config['embedding_size']],
                dtype=np.float32)

        #construct batch imgs and do inference
        batch_size = config.get("batch_size", 32)
        batch_img = []
        for i, image_file in enumerate(tqdm(gallery_images)):
            img = cv2.imread(image_file)
            if img is None:
                logger.error("img empty, please check {}".format(image_file))
                exit()
            img = img[:, :, ::-1]
            batch_img.append(img)

            if (i + 1) % batch_size == 0:
                rec_feat = self.rec_predictor.predict(batch_img)
                gallery_features[i - batch_size + 1:i + 1, :] = rec_feat
                batch_img = []

        if len(batch_img) > 0:
            rec_feat = self.rec_predictor.predict(batch_img)
            gallery_features[-len(batch_img):, :] = rec_feat
            batch_img = []

        return gallery_features
Пример #32
0
def get_weights(saver, sess):
    ''' load model weights if they were saved previously '''
    if is_file_prefix('TRAIN.CNN.CHECKPOINT'):
        saver.restore(sess, get('TRAIN.CNN.CHECKPOINT'))
        print('Yay! I restored weights from a saved model!')
    else:
        print('OK, I did not find a saved model, so I will start training from scratch!')
Пример #33
0
    def setUpClass(cls):
        cls.logger = logging.getLogger(cls.__class__.__name__)
        cls.logger.setLevel(getattr(logging, config.get("log_level")))
        stream_handler = logging.StreamHandler(sys.stdout)
        cls.logger.addHandler(stream_handler)

        cls.verify = verify.VerifyFuncs(cls.logger)
        cls.publisher = publisher.PublishFuncs(cls.logger)
Пример #34
0
 def _change_ip(self):
     change_type = config.get('main', 'IP_CHANGE')
     if change_type == 'neo':
         zmiana()
     elif change_type == 'play':
         ipchange()
     else:
         raise UnrecognizedIpchangeType("Invalid IP_CHANGE value in configuration file.")
Пример #35
0
def on_orders_queue_declared(frame):
    """Called when a queue has been declared on the `orders` exchange."""
    channel = config.get("channel")
    callbacks = dict(validated=process)
    queue_name  = frame.method.queue
    assert queue_name in callbacks, "Unknown queue: %s" % queue_name

    channel.basic_consume(callbacks[queue_name], queue=queue_name)
    channel.queue_bind(exchange="orders", queue=queue_name)
Пример #36
0
def handle_ctl_msg(channel, method, header, body):
    """Called when we receive a control message from the shell."""
    body = body.strip()
    print "* Rcvd ctrl msg: %s" % body
    channel.basic_ack(delivery_tag=method.delivery_tag)

    connection = config.get("connection")
    result_queue = config.get("result_queue")

    if body == "quit":
        channel.basic_publish(exchange="rpc", routing_key="jobs", body=body,
            properties=pika.BasicProperties(delivery_mode=1))
        connection.close()
        connection.ioloop.start()
    else:
        channel.basic_publish(exchange="rpc", routing_key="jobs", body=body,
            properties=pika.BasicProperties(
                delivery_mode=1, reply_to=result_queue))
Пример #37
0
def on_incoming_queue_declared(frame):
    """Called when a queue has been declared on the `incoming` exchange."""
    channel = config.get("channel")
    callbacks = dict(incoming=decrypt, decrypted=authenticate)
    queue_name  = frame.method.queue
    assert queue_name in callbacks, "Unknown queue: %s" % queue_name

    channel.basic_consume(callbacks[queue_name], queue=queue_name)
    channel.queue_bind(exchange="incoming", queue=queue_name,
                       routing_key="rk-%s" % queue_name)
Пример #38
0
    def test_9_parallel_publication(self):
        dsets = datasets.get_parallel_test_datasets()
        pool_step = int(config.get('partest_pool_size_increment'))
        pool_max = int(config.get('partest_pool_size_max'))

        try:
            self.parallel_log_threshold = getattr(logging, config.get('partest_log_level'))
        except KeyError:
            pass

        pool_sizes = range(pool_step, 1 + pool_max, pool_step)

        self.ensure_empty(dset_list = dsets)
        try:
            self.run_parallel_tests(dsets, pool_sizes)
        finally:
            # the TearDownClass only removes the basic test data, not all the 
            # datasets used in the parallel test, so do it here instead.
            self.ensure_empty(dset_list = dsets)
Пример #39
0
def on_result_queue_declared(frame):
    """
    Called when the RPC results queue has been declared, the generated
    name is in the frame (response from RabbitMQ).
    """
    result_queue = frame.method.queue
    config.set("result_queue", result_queue)
    channel = config.get("channel")
    channel.basic_consume(handle_result, queue=result_queue)
    channel.queue_bind(exchange="rpc", queue=result_queue,
                       routing_key=result_queue)
Пример #40
0
def send_email(to, subject, html, sender=None):
    url = config.get('sendgrid', 'send_url')
    params = {
        'api_user': env.get('SENDGRID_USER'),
        'api_key': env.get('SENDGRID_KEY'),
        'to': to,
        'subject': subject,
        'html': html,
        'from': sender or '*****@*****.**'
    }
    rep = requests.get(url, params=params)
    rep.raise_for_status()
Пример #41
0
def pull_and_save_tweets(api):
    cfg = config.get("db")
    uri = "%s:%s@%s/%s" % (
        cfg.get("user"), cfg.get("password"), cfg.get("host"), cfg.get("db"))
    conn = pymongo.Connection(uri)
    db = conn[cfg.get("db")]
    messages = db[cfg.get("messages_coll")]
    messages.ensure_index("id")
    maxids = db[cfg.get("maxids_coll")]

    cfg = config.get("twitter")
    since_id = get_since_id(maxids, cfg.get("user_name"))
    log.msg(pprint.pformat(since_id))

    tweets = []
    try:
        if since_id["id"]:
            tweets = api.getHomeTimeline(since_id=since_id["id"], count=199)
        else:
            tweets = api.getHomeTimeline(count=199)
    except Exception, e:
        log.err(e)
Пример #42
0
def send_email(to_addrs, subject, msg, from_addr, cc=None, bcc=None):
    """ Send an email """
    msg['Subject'] = Header(subject, 'utf-8')
    msg['From'] = from_addr

    assert isinstance(to_addrs, list)
    msg['To'] = ', '.join(to_addrs)
    
    if cc:
        msg['CC'] = ', '.join(cc)
        to_addrs.extend(cc)
    
    if bcc:
        to_addrs.extend(bcc)
        
    smtp = smtplib.SMTP()
    smtp.connect(config.get('email', 'email_host'))
    smtp.sendmail(from_addr, to_addrs, msg.as_string())
    smtp.quit()
Пример #43
0
def create_admin_user():
    admin = User("administrator", "changeme", "The", "Administrator", config.get('admin', 'admin_email'),
                 validated=True)
    session.add(admin)
    session.flush()

    admin_role = UserRoles(admin, "Administrator", False)
    session.add(admin_role)
    session.flush()

    for role in UserRoles.roles:
        if role != "Administrator":
            new_role = UserRoles(admin, role, True)
            session.add(new_role)
            session.flush()

    admin.add_change(admin)
    session.flush()

    session.commit()
    return admin
Пример #44
0
    async def connect(self, hostname, port):
        # config
        server = config.get('default', 'server')
        server_port = config.getint('default', 'server_port')

        loop = asyncio.get_event_loop()
        # 和proxyclient建立连接
        try:
            transport, client = await loop.create_connection(ProxyClient,
                                                             server,
                                                             server_port)
        # 连接失败
        except Exception:
            logging.error('Could not connect server: {}:{}'.format(server, server_port))
            if self.transport.can_write_eof():
                self.transport.write_eof()
            return False

        # 绑定server_transport和trasport
        client.server_transport = self.transport
        self.client_transport = transport
        # 发送地址信息, 域名和端口
        self.client_transport.write(
            pack('!i%ssH' % len(hostname), len(hostname), hostname, port))
Пример #45
0
import calendar

try:
    from utils import log, config
except Exception, e:
    sys.path.append('../')
    from utils import log, config

try:
    from mongo import mongo_commons
except Exception, e:
    sys.path.append('../')
    from mongo import mongo_commons

config = config.Config('crowddata')
client = pymongo.MongoClient(config.get('database').get('connection'))
database = config.get('database').get('db')
document_data = config.get('database').get('document').get('data')
document_date = config.get('database').get('document').get('date')

def insertData(json):
    # insert (unique) date
    date = [int(s) for s in json['date'].split(',')]
    json["date"] = datetime.datetime(int(date[0]), int(date[1]), int(date[2]))

    date = [int(s) for s in json['fulldate'].split(',')]
    json["fulldate"] = datetime.datetime(int(date[0]), int(date[1]), int(date[2]), int(date[3]), int(date[4]), int(date[5]))

    # insert data json in data
    mongo_commons.insert(client, database, document_data, json)
    cursor = client[database][document_date].find({ "date" : json["date"]}).count()
Пример #46
0
 def __init__(self):
     self._username = config.get('captcha', 'username')
     self._password = config.get('captcha', 'password')
Пример #47
0
def main():
	server = GameServer(config.get('port'), GameClient)
	server.serve_forever()
Пример #48
0
    collection = "layer"
    print sortdate
    result = db.find_by_code(collection, code, int(sortdate))
    return Response(result, content_type='application/json; charset=utf-8')


@app.route('/geometadata/query/<collection>', methods=['PUT'])
@cross_origin(origins='*')
def query_post(collection):
    # print collection
    # query = json.dumps(request.form['payload'])
    payload = json.loads(request.json['payload'])
    print payload
    result = db.find_query(collection, payload)
    print result
    return Response(result, content_type='application/json; charset=utf-8')


@app.route('/geometadata/delete/<layername>', methods=['DELETE'])
@cross_origin()
def delete_layer( layername):
    return "TODO :Remove layername from layer and stats collections"


if __name__ == '__main__':
    l.info(config.get('ip') + ':' + str(config.get('port')))
    app.run(host=config.get('ip'), port=config.get('port'), debug=config.get('debug'))



Пример #49
0
            if self.transport.can_write_eof():
                self.transport.write_eof()
            return False

        # 绑定server_transport和trasport
        client.server_transport = self.transport
        self.client_transport = transport
        # 发送地址信息, 域名和端口
        self.client_transport.write(
            pack('!i%ssH' % len(hostname), len(hostname), hostname, port))


if __name__ == '__main__':
    # config
    debug = config.getboolean('default', 'debug')
    local = config.get('default', 'local')
    local_port = config.getint('default', 'local_port')

    if debug:
        debug_level = logging.DEBUG
    else:
        debug_level = logging.ERROR

    # log
    logging.basicConfig(level=debug_level,
                        format='%(threadName)10s %(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
    logging.getLogger('asyncio').setLevel(debug_level)

    loop = asyncio.get_event_loop()
    if debug:
Пример #50
0
        client.server_transport = self.transport
        self.client_transport = transport
        client.hostname = hostname

        # 返回给浏览器
        hostip, port = transport.get_extra_info('sockname')
        host = unpack("!I", socket.inet_aton(hostip))[0]
        self.transport.write(
            pack('!BBBBIH', 0x05, 0x00, 0x00, 0x01, host, port))


if __name__ == '__main__':
    # config
    debug = config.getboolean('default', 'debug')
    server = config.get('default', 'server')
    server_port = config.getint('default', 'server_port')

    if debug:
        debug_level = logging.DEBUG
    else:
        debug_level = logging.ERROR

    # log
    logging.basicConfig(level=debug_level,
                        format='%(threadName)10s %(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S', filemode='a+')
    logging.getLogger('asyncio').setLevel(debug_level)

    loop = asyncio.get_event_loop()
    if debug:
Пример #51
0
    try:
        if since_id["id"]:
            tweets = api.getHomeTimeline(since_id=since_id["id"], count=199)
        else:
            tweets = api.getHomeTimeline(count=199)
    except Exception, e:
        log.err(e)

    log.msg(len(tweets))
    if tweets:
        for tweet in tweets:
            if not isinstance(tweet, dict):
                continue
            try:
                tweet["text"] = urls.sanitize_urls(tweet["text"])
            except:
                pass # not much we can do here
        messages.insert(tweets)
        ids = [int(tweet["id"]) for tweet in tweets if isinstance(tweet, dict)]
        if ids:
            since_id["id"] = max(ids)
            maxids.save(since_id)


l = task.LoopingCall(pull_and_save_tweets, api)
cfg = config.get("twitter")
l.start(float(cfg.get("interval", 600))) # call every ten minutes

# l.stop() will stop the looping calls
reactor.run()
Пример #52
0
try:
    from utils import log, config
except Exception, e:
    sys.path.append('../')
    from utils import log, config

try:
    from mongo import mongo_commons
except Exception, e:
    sys.path.append('../')
    from mongo import mongo_commons


config = config.Config('geometadata')
client = pymongo.MongoClient(config.get('database').get('connection'))
database = config.get('database').get('db')
document_layer = config.get('database').get('document').get('layer')
document_stats = config.get('database').get('document').get('stats')

"""
Insert Layer Metadata in mongodb
@param json: json data
@return: id
"""
def insertMetadata(json):
    return mongo_commons.insert(client, database, document_layer, json)

"""
Delete Layer Metadata in mongodb
@param json: json data
Пример #53
0
import requests, shlex, sys, json
from utils import message_printer, get_location, gen_map, prettyprint_map, reports_printer, comments_printer
from utils import config

APIBASE = "http://s.jdiez.me:5000/json/"
APIBASE = config.get("api", "base")

print "euskalMap CLI consumer - v.0.1"
print

def help(arg):
	print "Available commands: "
	print
	print ', '.join(commands.keys()) # weird python syntax. lol

def request_error(code, response):
	print "! API returned code " + str(code) + " and data:"
	print response
	
def show(arg):
	def messages(arg):
		def print_data(url):
			data = requests.get(url)
			if data.status_code != 200:
				request_error(data.status_code, data.text)

			data = json.loads(data.text)
			message_printer(data)
	
		additional_arguments =	{
									'timestamp': 'has_timestamp',
Пример #54
0
# we make sure that rating is a num between 0-100
try:
    float(rating)
except TypeError:
    rating = None

if not comment:
    raise ValidationError('Must provide comment to add')

# get the image id
# we are going to use the json map to
# retreive the path to the image
media_id = form.getvalue('mid')

# lookup the media path
lookup_path = config.get('map_path')
resource_map = json.load(open(lookup_path,'r'))

path = resource_map.get(media_id)
if not path:
    raise ValidationError('Media not found')

set_image_comment(path,body=comment,rating=rating)

# where do we point them back to? that medias page
page_url = get_media_page_url(media_id)

# lets update the pages associated w/ that id
page_writer = PageWriter(template_root=config.get('template_root'),
                         pages_root=config.get('pages_root'),
                         media_dir_template=config.get('media_dir_template'))
Пример #55
0
def on_job_queue_declared(frame):
    """Called when the RPC job queue has been declared."""
    channel = config.get("channel")
    channel.basic_consume(handle_job, queue="jobs")
    channel.queue_bind(exchange="rpc", queue="jobs", routing_key="jobs")
Пример #56
0
import csv

try:
    from utils import log, config
except Exception, e:
    sys.path.append('../../')
    from utils import log, config

try:
    from postgresql.crowddata.crowddata import DBCrowddata
except Exception, e:
    sys.path.append('../../')
    from postgresql.crowddata.crowddata import DBCrowddata

config = config.Config('crowddata_postgresql')
database = config.get('database')
print database
db = DBCrowddata(database)
l = log.Logger()


class RandomData:

    def __init__(self):
        return None

    def date(self, start, end):
        delta = end - start
        int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
        random_second = randrange(int_delta)
        return start + timedelta(seconds=random_second)
Пример #57
0
def on_ctl_queue_declared(_):
    """Called when the control queue has been declared."""
    channel = config.get("channel")
    channel.basic_consume(handle_ctl_msg, queue='rpc_ctl')
Пример #58
0
import pymongo

try:
    from utils import log, config
except Exception, e:
    sys.path.append('../')
    from utils import log, config

try:
    from mongo import mongo_commons
except Exception, e:
    sys.path.append('../')
    from mongo import mongo_commons

config = config.Config('geostatistics')
client = pymongo.MongoClient(config.get('geostats_datastore').get('connection'))
database = config.get('geostats_datastore').get('db')

"""
Delete Layer Metadata in mongodb
@param json: json data
@return: id
"""
def removeMetadata(layer, json):
    return mongo_commons.remove(client, database, layer, json)


"""
Insert Layer Statistics in tmongodb
@param json: json data
@return: id
Пример #59
0
#!/usr/bin/env python

from utils import config
from utils.esg_config import default_config_path as esgini
from one_time_setup.simple_mapfile_gen import gen_all_mapfiles
from one_time_setup.make_dummy_ensemble import MakeDummyEnsemble

mapfile_dir = config.get('test_mapfile_dir')
data_root = config.get('test_data_dir')
host_certs_dir = config.get('host_certs_dir')

print "making ensemble for parallel test"
ensmaker = MakeDummyEnsemble(int(config.get('partest_ensemble_size')),
                             config.get('partest_ensemble_dir'),
                             config.get('partest_template_member'),
                             config.get('partest_member_pattern'))
ensmaker.make_ensemble()

print "making mapfiles"
gen_all_mapfiles(data_root, mapfile_dir)

print """
Mapfiles for test publication are set up.


You also need to:

(1) in %s:

  (a) ensure that the following directory is listed in 
      under thredds_dataset_roots: