예제 #1
0
 def get_delta(self, scalper, lock):
     _log(f"scraping delta for : {scalper['name']}")
     self.createTable(scalper)
     client = self.initConnector(scalper)
     if scalper['init_calc'] == 0:
         initial = (float(client.getBBO()['asks'][0][0]) +
                    float(client.getBBO()['bids'][0][0])) * 0.5
         self.scalper.find_one_and_update({'name': scalper['name']},
                                          {"$set": {
                                              'init_calc': initial
                                          }})
     elif int(time.time()
              ) >= scalper['next_calc'] and scalper['next_calc'] != 0:
         final = (float(client.getBBO()['asks'][0][0]) +
                  float(client.getBBO()['bids'][0][0])) * 0.5
         delta = final - scalper['init_calc']
         self.updateTable(delta=delta, key="delta", scalper=scalper)
         self.scalper.find_one_and_update({'name': scalper['name']}, {
             '$set': {
                 'next_calc':
                 int(int(time.time()) + scalper['time_interval']),
                 'init_calc': 0
             }
         })
     elif scalper['next_calc'] == 0:
         self.scalper.find_one_and_update({'name': scalper['name']}, {
             '$set': {
                 'next_calc':
                 int(int(time.time()) + scalper['time_interval'])
             }
         })
     lock.release()
     return
예제 #2
0
    def _jump_to_situation(self):
        if self.jump_to.endswith(".csv"):
            self.jump_to = "MainSituation_%s" % self.jump_to.split(".csv")[0]

        sit = globals()[self.jump_to](self)
        utils._log("JUMPING TO SITUATION: %s (%s)" % (self.jump_to, sit.__class__.__name__))
        return sit
예제 #3
0
    def cost(self, input, targets, mask=None, v_part=None, v_true=None):

        if self.drop_rate > 0.:
            input = _dropout_from_layer(input, self.drop_rate)

        logit = self.apply_score(input, v_part)
        self.softmax(logit)

        golden_idx = targets.flatten()  # must same with logit.shape[0]
        # oh, the error may because the targets is (y_maxlen, batch),
        # so v_true should also be (y_maxlen, batch) !!!
        if self.use_mv:
            _log(" building y flat idx with batch-level voc. ")
            golden_flat_idx = T.arange(
                golden_idx.shape[0]) * v_part.shape[0] + v_true.flatten()
        else:
            golden_flat_idx = T.arange(
                golden_idx.shape[0]) * self.lr_out + golden_idx

        gold_cost_flat = self.ce_p_y_give_x.flatten()[golden_flat_idx]
        log_norm_flat = self.log_norm.flatten()
        shape = [targets.shape[0], targets.shape[1]]
        if mask is not None:
            gold_cost_shape = gold_cost_flat.reshape(shape) * mask
            log_norm_shape = log_norm_flat.reshape(shape) * mask
        norm_cost_shape = gold_cost_shape + self.alpha * (log_norm_shape**2)
        # to observe how much we compressed log |Z(x)|
        return T.mean(norm_cost_shape), T.mean(T.abs_(log_norm_shape))
예제 #4
0
    def multi_process(self, x_iter, n_process=5):
        queue = Queue()
        rqueue = Queue()
        processes = [None] * n_process
        for pidx in xrange(n_process):
            processes[pidx] = Process(target=self.translate, args=(queue, rqueue, pidx))
            processes[pidx].start()

        def _send_jobs(x_iter):
            for idx, line in enumerate(x_iter):
                # _log(idx, line)
                queue.put((idx, line))
            return idx + 1

        def _finish_processes():
            for pidx in xrange(n_process):
                queue.put(None)

        def _retrieve_jobs(n_samples):
            trans = [None] * n_samples
            for idx in xrange(n_samples):
                resp = rqueue.get()
                trans[resp[0]] = resp[1]
                if numpy.mod(idx + 1, 1) == 0:
                    _log('Sample {}/{} Done'.format((idx + 1), n_samples))
            return trans

        _log('Translating ...')
        n_samples = _send_jobs(x_iter)     # sentence number in source file
        trans_res = _retrieve_jobs(n_samples)
        _finish_processes()
        _log('Done ...')

        return '\n'.join(trans_res)
예제 #5
0
 def _retrieve_jobs(n_samples):
     trans = [None] * n_samples
     for idx in xrange(n_samples):
         resp = rqueue.get()
         trans[resp[0]] = resp[1]
         if numpy.mod(idx + 1, 1) == 0:
             _log('Sample {}/{} Done'.format((idx + 1), n_samples))
     return trans
예제 #6
0
파일: lightcurve.py 프로젝트: cdeil/enrico
    def VariabilityIndex(self):
        LcOutPath = self.LCfolder + self.config['target']['name']

        utils._log('Computing Variability index ')

        self.config['Spectrum']['FitsGeneration'] = 'no'
#        ValueDC = self.GetDCValue()
        ResultDicDC = utils.ReadResult(self.config)
        LogL1 = []
        LogL0 = []
        Time = []
        for i in xrange(self.Nbin):
            CurConfig = get_config(self.configfile[i])
            #Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed
            try :
                ResultDic = utils.ReadResult(CurConfig)
            except :
                print "WARNING : fail reading the config file : ",CurConfig
                print "Job Number : ",i
                print "Please have a look at this job log file"
                continue

#            LogL1.append(ResultDic.get("log_like"))
            #Update the time and time error array
            Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.)

            ##############################################################
            #   Compute the loglike value using the DC flux or prefactor
            ##############################################################
            # Create one obs instance
            CurConfig['Spectrum']['FitsGeneration'] = 'no'
            _,Fit = GenAnalysisObject(CurConfig,verbose=0)#be quiet
            Fit.ftol = float(self.config['fitting']['ftol'])

            #Spectral index management!
            utils.FreezeParams(Fit, self.srcname, 'Index', -self.config['LightCurve']['SpectralIndex'])
            LogL1.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer']))

            Model_type = Fit.model.srcs[self.srcname].spectrum().genericName()
            if (Model_type == 'PowerLaw') :
                utils.FreezeParams(Fit, self.srcname, 'Prefactor', utils.fluxNorm(ResultDicDC['Prefactor']))
            if (Model_type == 'PowerLaw2') :
                utils.FreezeParams(Fit, self.srcname, 'Integral', utils.fluxNorm(ResultDicDC['Integral']))
            LogL0.append(-Fit.fit(0,optimizer=CurConfig['fitting']['optimizer']))

        Can = _GetCanvas()
        TgrDC = ROOT.TGraph(len(Time),np.array(Time),np.array(LogL0))
        TgrDC.Draw("ALP*")
        TgrDC = ROOT.TGraph(len(Time),np.array(Time),np.array(LogL0))
        TgrDC.SetMarkerColor(2)
        TgrDC.Draw("PL*")
        #Save the canvas in the LightCurve subfolder
        Can.Print(LcOutPath+'_VarIndex.eps')
        Can.Print(LcOutPath+'_VarIndex.C')
        print 
        print "TSvar = ",2*(sum(LogL1)-sum(LogL0))
        print "NDF = ",len(LogL0)-1
        print "Chi2 prob = ",ROOT.TMath.Prob(2*(sum(LogL1)-sum(LogL0)),len(LogL0)-1)
예제 #7
0
파일: RunGTlike.py 프로젝트: hombit/enrico
def Analysis(folder, config, tag="", convtyp='-1', verbose = 1):
    """ run an analysis"""
    Obs = Observation(folder, config, convtyp, tag=tag)
    if verbose:
        utils._log('SUMMARY: ' + tag)
        Obs.printSum()
    FitRunner = FitMaker(Obs, config)##Class
    if config['Spectrum']['FitsGeneration'] == 'yes':
        FitRunner.GenerateFits() #Generates fits files
    return FitRunner
예제 #8
0
 def single_trans_valid(self, x_iter):
     total_trans = []
     total_avg_merge_rate, total_sent_num = 0., 0
     for idx, line in enumerate(x_iter):
         s_filter = filter(lambda x: x != 0, line)
         avg_merges, trans = self.trans_onesent(s_filter)
         total_avg_merge_rate += avg_merges
         total_sent_num += 1
         total_trans.append(trans)
         if numpy.mod(idx + 1, 10) == 0:
             _log('Sample {} Done'.format((idx + 1)))
     _log('Done ...')
     return total_avg_merge_rate / total_sent_num, '\n'.join(total_trans)
예제 #9
0
    def calc_size(self, scalper):
        leverage = self.updateLeverage(scalper)
        cost = (scalper['marginBatch'] / 100) * scalper['margin'] * leverage
        client = self.initConnector(scalper)
        if scalper['side'] == "BUY":
            size = round(cost / float(client.getBBO()['asks'][0][0]), 3)
        elif scalper['side'] == "SELL":
            size = round(cost / float(client.getBBO()['bids'][0][0]), 3)

        # unit test
        _log(
            f"size: {size},leverage: {leverage}, marginBatch: {scalper['marginBatch']}, cost: {cost}, scalper: {scalper['name']}"
        )
        return size, cost
예제 #10
0
    def translate(self, queue, rqueue, pid):

        while True:
            req = queue.get()
            if req == None:
                break

            idx, src = req[0], req[1]
            _log('{}-{}'.format(pid, idx))
            s_filter = filter(lambda x: x != 0, src)
            _, trans = self.trans_onesent(s_filter)

            rqueue.put((idx, trans))

        return
예제 #11
0
 def apply_score(self, input, v_part=None, drop=False):
     # input: (trg_sent_len, batch_size, n_out) for training
     #        (1 or beamsize, n_out) for decoding
     # W0:    (n_out, voc_size)
     if drop is True and self.drop_rate > 0.:
         input = input * (1 - self.drop_rate)
     if self.use_mv:
         _log(" using batch-level voc. logit[v]")
         W_tran = T.transpose(self.W0)
         # self.b need to broadcasting
         logit = theano.dot(input, T.transpose(
             W_tran[v_part])) + self.b[v_part]
         # logit.shape: array([1, 3]) or array([80, 3]) if input.shape[0]==80
     else:
         _log(" using full output voc.")
         logit = theano.dot(input, self.W0) + self.b
     if logit.ndim == 3:
         logit = logit.reshape(
             [logit.shape[0] * logit.shape[1], logit.shape[2]])
     # when training, the logit.ndim is 3 (trg_sent_len, batch_size, voc_size)
     # when decoding, the logit.ndim is 2 (1 or beam_size, voc_size)
     return logit
예제 #12
0
    def updateOrder(self, **kwargs):
        scalper = kwargs['scalper']
        if scalper['status'] == "LIVE":
            docs = {
                "name": scalper['name'],
                "orderId": kwargs['response']['clientOrderId'],
                "timestamp": utc_timestamp(),
                "side": kwargs['response']['side'],
                "symbol": kwargs['response']['symbol'],
                "entryPrice": kwargs['entryPrice'],
                "exit": kwargs['exit'],
                "qty": kwargs['response']['origQty'],
                "cost": kwargs['cost'],
                "pnl": kwargs['pnl'],
                "paper_pnl": kwargs['paper_pnl']
            }
            updateOrder = self.liveId.insert_one(dict(docs))
            _log(f"newOrder: {docs['orderId']} inserted into liveId collecton",
                 "info")

        elif scalper['status'] == "DEMO":
            docs = {
                "name": scalper['name'],
                "orderId": kwargs['orderId'],
                "timestamp": utc_timestamp(),
                "side": scalper['side'],
                "symbol": scalper['symbol'],
                "entryPrice": kwargs['entryPrice'],
                "exit": kwargs['exit'],
                "qty": kwargs['size'],
                "cost": kwargs['cost'],
                "pnl": kwargs['pnl'],
                "paper_pnl": kwargs['paper_pnl']
            }
            updateOrder = self.demoId.insert_one(dict(docs))
            _log(f"newOrder: {docs['orderId']} inserted into demoId collecton",
                 "info")

        return
예제 #13
0
    def trans_samples(self, srcs, trgs):
        for index in range(len(srcs)):
            # print 'before filter: '
            # print s[index]
            # [   37   785   600    44   160  4074   152  3737     2   399  1096   170      4     8    29999     0     0     0     0     0     0     0]
            s_filter = filter(lambda x: x != 0, srcs[index])
            _log('\n[{:3}] {}'.format('src', _index2sentence(s_filter, self.svcb_i2w)))
            # ndarray -> list
            # s_filter: [   37   785   600    44   160  4074   152  3737     2   399
            # 1096   170      4 8    29999]
            t_filter = filter(lambda x: x != 0, trgs[index])
            _log('[{:3}] {}'.format('ref', _index2sentence(t_filter, self.tvcb_i2w)))

            _, trans = self.trans_onesent(s_filter)

            _log('[{:3}] {}\n'.format('out', trans))
예제 #14
0
    def cube_prune_trans(self, src_sent):

        self.translations = []
        src_sent = src_sent[0] if self.ifvalid else src_sent  # numpy ndarray

        self.ptv = numpy.asarray(src_sent[1],
                                 dtype='int32') if self.ifvalid else None
        np_src_sent = numpy.asarray(src_sent, dtype='int64')
        if np_src_sent.ndim == 1:  # x (5,)
            # x(5, 1), (src_sent_len, batch_size)
            np_src_sent = np_src_sent[:, None]

        src_sent_len = np_src_sent.shape[0]
        self.maxlen = 2 * src_sent_len  # x(src_sent_len, batch_size)

        s_im1, self.context, self.uh = self.fn_init(
            np_src_sent)  # np_src_sent (sl, 1), beam==1

        # (1, trg_nhids), (src_len, 1, src_nhids*2)
        init_beam(self.beam,
                  cnt=self.maxlen,
                  init_state=s_im1,
                  detail=False,
                  cp=True)

        best_trans, best_loss = self.cube_pruning()

        _log('@source[{}], translation(without eos)[{}], maxlen[{}], loss[{}]'.
             format(src_sent_len, len(best_trans), self.maxlen, best_loss))
        _log(
            'init[{}] nh[{}] na[{}] ns[{}] mo[{}] ws[{}] ps[{}] ce[{}]'.format(
                *self.lqc[0:8]))

        avg_merges = format(self.lqc[9] / self.lqc[8], '0.3f')
        _log('average merge count[{}/{}={}]'.format(self.lqc[9], self.lqc[8],
                                                    avg_merges))

        return _filter_reidx(self.bos_id, self.eos_id, best_trans,
                             self.tvcb_i2w, self.ifmv, self.ptv)
예제 #15
0
def test_log_raise_on_unknown_level():
    with raises(utils.NoSuchLogLevel):
        utils._log('fake_service', 'fake_line', 'build', 'BOGUS_LEVEL')
예제 #16
0
    def create_cube(self, bidx, eq_classes):
        # eq_classes: (score_im1, y_im1, hi, ai, loc_in_prevb) NEW

        cube = []
        cnt_transed = len(self.translations)
        for whichsubcub, leq_class in eq_classes.iteritems():  # sub cube

            each_subcube_rowsz = len(leq_class)
            self.prev_beam_ptrs += each_subcube_rowsz
            #print self.prev_beam_ptrs
            #if bidx >= 2 and self.prev_beam_ptrs > self.avg_bp_by_cur_step + 5:
            #    return cube

            score_im1_r0, s_im1_r0, y_im1, y_im2, y_im3, _ = leq_class[0]
            subcube = []
            subcube_line_cache = []
            _avg_si, _avg_hi, _avg_ai, _avg_scores_i = None, None, None, None
            _cube_lm_krank_ces_i, _cube_krank_scores_i = None, None

            if each_subcube_rowsz == 1:
                _avg_sim1 = s_im1_r0
                self.onerow_subcube_cnt += 1
            else:
                merged_score_im1 = [tup[0] for tup in leq_class]
                merged_sim1 = [tup[1] for tup in leq_class[0:1]]
                np_merged_score_im1 = numpy.array(merged_score_im1,
                                                  dtype='float32')
                np_merged_sim1 = numpy.array(merged_sim1)
                # arithmetic mean
                _avg_sim1 = numpy.mean(np_merged_sim1, axis=0)

                # geometric mean , not work
                #_avg_sim1 = numpy.power(numpy.prod(np_merged_sim1, axis=0), 1.0 /
                #                        np_merged_sim1.shape[0])

                # harmonic mean
                #_avg_sim1 = np_merged_sim1.shape[0] / numpy.sum(1.0 / np_merged_sim1, axis=0)

                # weighted harmonic mean
                #assert(np_merged_sim1.shape[0] == np_merged_score_im1.shape[0])
                #_avg_sim1 = numpy.sum(np_merged_score_im1, axis=0) / numpy.sum(
                #    np_merged_score_im1[:,None,None] / np_merged_sim1, axis=0)

                # weighted mean
                #exp_score_im1 = numpy.exp(np_merged_score_im1 -
                #                                    numpy.max(np_merged_score_im1, axis=0))
                #softmax_score_im1 = exp_score_im1 / exp_score_im1.sum()
                #_avg_sim1 = numpy.sum(softmax_score_im1[:,None,None] * np_merged_sim1, axis=0)

                # quadratic mean, not work
                #_avg_sim1 = numpy.power(numpy.mean(numpy.power(np_merged_sim1, 2), axis=0),
                #                       1.0 / np_merged_sim1.shape[0])

                #
                # for tup in leq_class: watch the attention prob pi dist here ....

            if self.lm is not None and bidx >= 4:
                # TODO sort the row dimension by language model words distribution
                debug('sort by lm: -3 -2 -1 => {} {} {}'.format(
                    y_im3, y_im2, y_im1))
                if self.ngram == 2:
                    gram = [y_im1]
                elif self.ngram == 3:
                    gram = [y_im1] if y_im2 == -1 else [y_im2, y_im1]
                elif self.ngram == 4:
                    gram = [y_im1] if y_im3 == -1 and y_im2 == -1 else (
                        [y_im2, y_im1]
                        if y_im3 == -1 else [y_im3, y_im2, y_im1])
                else:
                    raise NotImplementedError

                lm_next_logps, next_ids = vocab_prob_given_ngram(
                    self.lm, gram, self.tvcb, self.tvcb_i2w)
                np_lm_next_neg_logps = -numpy.asarray(lm_next_logps)
                np_next_ids = numpy.asarray(next_ids)

                _next_krank_ids = part_sort(np_lm_next_neg_logps,
                                            self.k - cnt_transed)
                _cube_lm_krank_ces_i = np_lm_next_neg_logps[_next_krank_ids]
                _next_krank_wids = np_next_ids[_next_krank_ids]

                for idx in gram:
                    _log(idx if idx == -1 else self.tvcb_i2w[idx] + ' ',
                         nl=False)
                _log('=> ', nl=False)
                for wid in _next_krank_wids:
                    _log('{}({}) '.format(self.tvcb_i2w[wid],
                                          np_lm_next_neg_logps[wid]),
                         nl=False)
                _log('')
                self.pop_subcube_approx_cache.append(None)
            else:
                # TODO sort the row dimension by average scores
                debug('sort by averge scores')
                _y_emb_im1, _avg_hi = self.fn_nh(y_im1, _avg_sim1)
                _, _avg_ai = self.fn_na(self.context, self.uh, _avg_hi)
                _avg_si = self.fn_ns(_avg_hi, _avg_ai)
                _avg_moi = self.fn_mo(_y_emb_im1, _avg_ai, _avg_si)
                _avg_scores_i = self.fn_pws(_avg_moi,
                                            self.ptv)  # the larger the better
                _avg_ces_i = self.fn_ce(_avg_scores_i).flatten()
                _next_krank_wids = part_sort(_avg_ces_i, self.k - cnt_transed)
                _cube_krank_scores_i = _cube_krank_ces_ith = _avg_ces_i[
                    _next_krank_wids]

                self.pop_subcube_approx_cache.append(
                    (_y_emb_im1, _avg_hi, _avg_ai, _avg_si, _avg_moi,
                     _avg_scores_i, _next_krank_wids, _cube_krank_ces_ith))
            self.push_subcube_approx_cache.append(None)

            # add cnt for error The truth value of an array with more than one element is ambiguous
            for i, tup in enumerate(leq_class):
                subcube.append([
                    tup +
                    (_avg_sim1, None if _cube_lm_krank_ces_i is None else
                     _cube_lm_krank_ces_i[j], None if
                     _cube_krank_scores_i is None else _cube_krank_scores_i[j],
                     wid, i, j, whichsubcub, each_subcube_rowsz)
                    for j, wid in enumerate(_next_krank_wids)
                ])
                subcube_line_cache.append(None)

            cube.append(subcube)
            self.subcube_lines_cache.append(subcube_line_cache)

            self.printCube(cube)

        return cube
예제 #17
0
def test_log_raise_on_unknown_level():
    with raises(utils.NoSuchLogLevel):
        utils._log('fake_service', 'fake_line', 'build', 'BOGUS_LEVEL')
예제 #18
0
    def cube_pruning(self):

        for bidx in range(1, self.maxlen + 1):

            eq_classes = OrderedDict()
            self.approx_items, self.cube_lines_mergeout = [], []
            self.merge(bidx, eq_classes)

            # create cube and generate next beam from cube
            cube = self.create_cube(bidx, eq_classes)

            if self.cube_prune(bidx, cube):
                _log('early stop! see {} samples ending with EOS.'.format(
                    self.k))
                avg_bp = format(self.locrt[0] / self.locrt[1], '0.3f')
                _log('average location of back pointers [{}/{}={}]'.format(
                    self.locrt[0], self.locrt[1], avg_bp))
                sorted_samples = sorted(self.translations,
                                        key=lambda tup: tup[0])
                best_sample = sorted_samples[0]
                _log('translation length(with EOS) [{}]'.format(
                    best_sample[-1]))
                for sample in sorted_samples:  # tuples
                    _log('{}'.format(sample))

                return back_tracking(self.beam, best_sample, False)

            self.beam[bidx] = sorted(self.beam[bidx], key=lambda tup: tup[0])
            debug('beam {} ----------------------------'.format(bidx))
            for b in self.beam[bidx]:
                debug('{}'.format(b))
                # debug('{}'.format(b[0:1] + b[2:]))
            # because of the the estimation of P(f|abcd) as P(f|cd), so the generated beam by
            # cube pruning may out of order by loss, so we need to sort it again here
            # losss from low to high

        # no early stop, back tracking
        avg_bp = format(self.locrt[0] / self.locrt[1], '0.3f')
        _log('average location of back pointers [{}/{}={}]'.format(
            self.locrt[0], self.locrt[1], avg_bp))
        if len(self.translations) == 0:
            _log('no early stop, no candidates ends with EOS, selecting from '
                 'len {} candidates, may not end with EOS.'.format(
                     self.maxlen))
            best_sample = (self.beam[self.maxlen][0][0],) + \
                self.beam[self.maxlen][0][2:] + (self.maxlen, )
            _log('translation length(with EOS) [{}]'.format(best_sample[-1]))
            return back_tracking(self.beam, best_sample, False)
        else:
            _log(
                'no early stop, not enough {} candidates end with EOS, selecting the best '
                'sample ending with EOS from {} samples.'.format(
                    self.k, len(self.translations)))
            sorted_samples = sorted(self.translations, key=lambda tup: tup[0])
            best_sample = sorted_samples[0]
            _log('translation length(with EOS) [{}]'.format(best_sample[-1]))
            for sample in sorted_samples:  # tuples
                _log('{}'.format(sample))
            return back_tracking(self.beam, best_sample, False)
예제 #19
0
    def create_cube(self, bidx, eq_classes):
        # eq_classes: (score_im1, y_im1, hi, ai, loc_in_prevb) NEW

        cube = []
        cnt_transed = len(self.translations)
        for whichsubcub, leq_class in eq_classes.iteritems():  # sub cube

            each_subcube_rowsz = len(leq_class)

            score_im1_r0, s_im1_r0, y_im1, y_im2, y_im3, _ = leq_class[0]
            subcube = []
            subcube_line_mergeout = []
            _avg_si, _avg_hi, _avg_ai, _avg_scores_i = None, None, None, None
            _cube_lm_krank_ces_i, _cube_krank_scores_i = None, None

            if each_subcube_rowsz == 1:
                _avg_sim1 = s_im1_r0
            else:
                merged_sim1 = [tup[1] for tup in leq_class]
                _avg_sim1 = numpy.mean(numpy.array(merged_sim1), axis=0)
                # for tup in leq_class: watch the attention prob pi dist here ....

            if self.lm is not None and bidx >= 4:
                # TODO sort the row dimension by language model words distribution
                debug('sort by lm: -3 -2 -1 => {} {} {}'.format(
                    y_im3, y_im2, y_im1))
                if self.ngram == 2:
                    gram = [y_im1]
                elif self.ngram == 3:
                    gram = [y_im1] if y_im2 == -1 else [y_im2, y_im1]
                elif self.ngram == 4:
                    gram = [y_im1] if y_im3 == -1 and y_im2 == -1 else (
                        [y_im2, y_im1]
                        if y_im3 == -1 else [y_im3, y_im2, y_im1])
                else:
                    raise NotImplementedError

                lm_next_logps, next_wids = vocab_prob_given_ngram(
                    self.lm, gram, self.tvcb, self.tvcb_i2w)
                np_lm_next_logps = numpy.asarray(lm_next_logps)
                np_next_wids = numpy.asarray(next_wids)

                np_lm_next_neg_logps = -np_lm_next_logps
                _next_krank_ids = part_sort(np_lm_next_neg_logps,
                                            self.k - cnt_transed)
                _cube_lm_krank_ces_i = np_lm_next_neg_logps[_next_krank_ids]
                _next_krank_wids = np_next_wids[_next_krank_ids]

                for idx in gram:
                    _log(idx if idx == -1 else self.tvcb_i2w[idx] + ' ',
                         nl=False)
                _log('=> ', nl=False)
                for wid in _next_krank_wids:
                    _log('{}({}) '.format(self.tvcb_i2w[wid],
                                          np_lm_next_neg_logps[wid]),
                         nl=False)
                _log('')
                self.approx_items.append(None)
            else:
                # TODO sort the row dimension by average scores
                debug('sort by averge scores')
                _y_emb_im1, _avg_hi = self.fn_nh(y_im1, _avg_sim1)
                _, _avg_ai = self.fn_na(self.context, self.uh, _avg_hi)
                _avg_si = self.fn_ns(_avg_hi, _avg_ai)
                _avg_moi = self.fn_mo(_y_emb_im1, _avg_ai, _avg_si)
                _avg_scores_i = self.fn_pws(_avg_moi,
                                            self.ptv)  # the larger the better
                _avg_scores_i_flat = _avg_scores_i.flatten()
                _next_krank_ids = part_sort(-_avg_scores_i_flat,
                                            self.k - cnt_transed)
                _next_krank_wids = _next_krank_ids
                _cube_krank_scores_i = _avg_scores_i_flat[_next_krank_wids]
                #_avg_ces_i = self.fn_ce(_avg_scores_i).flatten()
                #_cube_krank_scores_i = _avg_ces_i[_next_krank_wids]

                self.approx_items.append(
                    (_y_emb_im1, _avg_hi, _avg_ai, _avg_si, _avg_moi,
                     _avg_scores_i, _next_krank_wids))

            # add cnt for error The truth value of an array with more than one element is ambiguous
            for i, tup in enumerate(leq_class):
                subcube.append([
                    tup +
                    (_avg_sim1, None if _cube_lm_krank_ces_i is None else
                     _cube_lm_krank_ces_i[j], None if
                     _cube_krank_scores_i is None else _cube_krank_scores_i[j],
                     wid, i, j, whichsubcub, each_subcube_rowsz)
                    for j, wid in enumerate(_next_krank_wids)
                ])
                subcube_line_mergeout.append(None)

            cube.append(subcube)
            self.cube_lines_mergeout.append(subcube_line_mergeout)

        # print created cube before generating current beam for debug ...
        debug(
            '\n################################ CUBE ################################'
        )
        nsubcube = len(cube)
        debug('MERGE => ', nl=False)
        for subcube_id in xrange(nsubcube):
            nmergings = len(cube[subcube_id])
            debug('{} '.format(nmergings), nl=False)
        debug('')
        for subcube_id in xrange(nsubcube):
            subcube = cube[subcube_id]
            nmergings = len(subcube)
            debug('Group: {} contains {} mergings:'.format(
                subcube_id, nmergings))
            for mergeid in xrange(nmergings):
                line_in_subcube = subcube[mergeid]
                first_item = line_in_subcube[0]
                score_im1, y_im1 = first_item[0], first_item[2]
                y_im1_w = None if y_im1 == -1 else self.tvcb_i2w[y_im1]
                debug('{}={}({: >7}) => '.format(y_im1, y_im1_w,
                                                 format(score_im1, '0.2f')),
                      nl=False)
                for cubetup in line_in_subcube:
                    wid = cubetup[-5]
                    lm_score = cubetup[-7]
                    model_score = cubetup[-6]
                    debug('{}={}({: >5}&+{: >5}={: >5}) | '.format(
                        wid, self.tvcb_i2w[wid],
                        None if lm_score is None else format(lm_score, '0.2f'),
                        None if model_score is None else format(
                            model_score, '0.2f'),
                        None if model_score is None else format(
                            score_im1 + model_score, '0.2f')),
                          nl=False)
                debug('')
        debug(
            '######################################################################'
        )

        return cube
예제 #20
0
 def initMongo(self) -> MongoClient:
     mclient = MongoClient("uri")["collection"]
     _log(f"mongoDB connection succesfull", "info")
     return mclient
예제 #21
0
    def exit_trigger(self, scalper, lock):
        client = self.initConnector(scalper)
        if scalper['status'] == "LIVE":
            orders = list(
                self.liveId.find(
                    {"$and": [{
                        'name': scalper['name']
                    }, {
                        'exit': False
                    }]}))
        if scalper['status'] == "DEMO":
            orders = list(
                self.demoId.find(
                    {"$and": [{
                        'name': scalper['name']
                    }, {
                        'exit': False
                    }]}))
        positionSide = scalper['positionSide']
        orderType = "MARKET"
        cumulative_margin = round(
            len(orders) * (scalper['marginBatch'] / 100) * scalper['margin'],
            3)
        cumulative_price = 0
        cumulative_size = 0
        cumulative_pnl = 0
        if len(orders) > 0:
            for order in orders:
                cumulative_price += float(order['entryPrice'])
                cumulative_size += float(order['qty'])

            average_price = cumulative_price / len(orders)

            if scalper['side'] == "BUY":
                cumulative_pnl = round(
                    cumulative_size *
                    (float(client.getBBO()['bids'][0][0]) - average_price) -
                    cumulative_size * (self.fee / 100), 2)

            if scalper['side'] == "SELL":
                cumulative_pnl = round(
                    cumulative_size *
                    (float(client.getBBO()['asks'][0][0]) - average_price) -
                    cumulative_size * (self.fee / 100), 2) * -1

        _log(
            f"totalSize: {cumulative_size}, marginUsed: {cumulative_margin}, totalPnl: {cumulative_pnl} scalper: {scalper['name']}"
        )

        cumulative_size = round(cumulative_size, 3)
        if (cumulative_pnl >= scalper['margin'] *
            (scalper['take_profit'] / 100)) or (
                cumulative_pnl <= -1 * (scalper['margin'] *
                                        (scalper['stop_loss'] / 100))):
            if scalper['side'] == "BUY":
                side = "SELL"
                if scalper['status'] == "LIVE":
                    response = client.post_order(scalper['symbol'],
                                                 cumulative_size, side,
                                                 positionSide, orderType)
                    try:
                        if response['status'] == "NEW":
                            for order in orders:
                                self.liveId.find_one_and_update(
                                    {"orderId": order['orderId']},
                                    {"$set": {
                                        'exit': True
                                    }})
                            self.updateTrades(scalper, cumulative_pnl,
                                              time.ctime())
                    except:
                        _log(response, "error")
                if scalper['status'] == "DEMO":
                    for order in orders:
                        self.demoId.find_one_and_update(
                            {"orderId": order['orderId']},
                            {"$set": {
                                'exit': True
                            }})
                self.initMongo().scalper.find_one_and_update(
                    {"name": scalper['name']}, {"$set": {
                        "marginUsed": 0
                    }})

            if scalper['side'] == "SELL":
                side = "BUY"
                if scalper['status'] == "LIVE":
                    response = client.post_order(scalper['symbol'],
                                                 cumulative_size, side,
                                                 positionSide, orderType)
                    try:
                        if response['status'] == "NEW":
                            for order in orders:
                                self.liveId.find_one_and_update(
                                    {"orderId": order['orderId']},
                                    {"$set": {
                                        'exit': True
                                    }})
                            self.updateTrades(scalper, cumulative_pnl,
                                              time.ctime())
                    except:
                        _log(response, "error")
                if scalper['status'] == "DEMO":
                    for order in orders:
                        self.demoId.find_one_and_update(
                            {"orderId": order['orderId']},
                            {"$set": {
                                'exit': True
                            }})
                self.initMongo().scalper.find_one_and_update(
                    {"name": scalper['name']}, {"$set": {
                        "marginUsed": 0
                    }})

            self.initMongo().scalper.find_one_and_update(
                {"name": scalper['name']},
                {"$inc": {
                    "performance": cumulative_pnl
                }})
            return
예제 #22
0
    def entry_trigger(self, scalper, lock):
        client = self.initConnector(scalper)
        client.hedge_mode()
        positionSide = scalper['positionSide']
        orderType = "MARKET"
        ub, lb, zs = self.calc_zscore(scalper)
        _log(f"Status {scalper['name']} => lb: {lb} | zs: {zs} | ub: {ub}")
        if (scalper['side']
                == "BUY") and (scalper['marginUsed'] < scalper['margin']):
            if zs > ub:
                self.scalper.find_one_and_update({"name": scalper['name']},
                                                 {"$inc": {
                                                     "signal": 1
                                                 }})
                _log(f"Buy Triggered for {scalper['name']}")
                orderId = gen_uuid()
                size, cost = self.calc_size(scalper)
                marginBatch = (scalper['marginBatch'] /
                               100) * scalper['margin']
                if scalper['status'] == "LIVE":
                    response = client.post_order(scalper['symbol'], size,
                                                 scalper['side'], positionSide,
                                                 orderType)
                    try:
                        if response['status'] == "NEW":
                            _log(f"Live order executed for Id: {orderId}",
                                 'info')
                            entryPrice = client.getBBO()['asks'][0][0]
                            self.updateOrder(response=response,
                                             entryPrice=entryPrice,
                                             cost=cost,
                                             exit=False,
                                             pnl=0,
                                             paper_pnl=0,
                                             scalper=scalper)
                            self.initMongo().scalper.find_one_and_update(
                                {"name": scalper['name']},
                                {"$inc": {
                                    "marginUsed": marginBatch
                                }})
                    except KeyError:
                        _log(response, 'error')
                elif scalper['status'] == "DEMO":
                    entryPrice = client.getBBO()['asks'][0][0]
                    _log(f"Demo order executed for Id: {orderId}", 'info')
                    self.updateOrder(orderId=orderId,
                                     cost=cost,
                                     size=size,
                                     entryPrice=entryPrice,
                                     exit=False,
                                     pnl=0,
                                     paper_pnl=0,
                                     scalper=scalper)
                    self.initMongo().scalper.find_one_and_update(
                        {"name": scalper['name']},
                        {"$inc": {
                            "marginUsed": marginBatch
                        }})

        if (scalper['side']
                == "SELL") and (scalper['marginUsed'] < scalper['margin']):
            if zs < lb:
                self.scalper.find_one_and_update({"name": scalper['name']},
                                                 {"$inc": {
                                                     "signal": 1
                                                 }})
                _log(f"Sell Triggered for {scalper['name']}")
                orderId = gen_uuid()
                size, cost = self.calc_size(scalper)
                marginBatch = (scalper['marginBatch'] /
                               100) * scalper['margin']
                if scalper['status'] == "LIVE":
                    response = client.post_order(scalper['symbol'], size,
                                                 scalper['side'], positionSide,
                                                 orderType)
                    try:
                        if response['status'] == "NEW":
                            _log(f"Live order executed for Id: {orderId}",
                                 'info')
                            entryPrice = client.getBBO()['bids'][0][0]
                            self.updateOrder(response=response,
                                             entryPrice=entryPrice,
                                             cost=cost,
                                             exit=False,
                                             pnl=0,
                                             paper_pnl=0,
                                             scalper=scalper)
                            self.initMongo().scalper.find_one_and_update(
                                {"name": scalper['name']},
                                {"$inc": {
                                    "marginUsed": marginBatch
                                }})
                    except KeyError:
                        _log(response, "error")
                elif scalper['status'] == "DEMO":
                    entryPrice = client.getBBO()['bids'][0][0]
                    _log(f"Demo order executed for Id: {orderId}", 'info')
                    self.updateOrder(orderId=orderId,
                                     cost=cost,
                                     size=size,
                                     entryPrice=entryPrice,
                                     exit=False,
                                     pnl=0,
                                     paper_pnl=0,
                                     scalper=scalper)
                    self.initMongo().scalper.find_one_and_update(
                        {"name": scalper['name']},
                        {"$inc": {
                            "marginUsed": marginBatch
                        }})

        lock.release()
        return
예제 #23
0
def test_log_raise_on_unknown_level():
    with raises(utils.NoSuchLogLevel):
        utils._log("fake_service", "fake_line", "build", "BOGUS_LEVEL")
예제 #24
0
    switchs = [args.use_valid, args.use_batch, args.use_score, args.use_norm, args.use_mv,
               args.watch_adist, args.merge_way, args.ifapprox_dist, args.ifapprox_att,
               args.add_lmscore, args.ifsplit]
    valid_set = args.valid_set
    kl = args.m_threshold
    nprocess = args.n_process
    lmpath = args.lm_path if args.lm_path is not None else None
    ngram = args.ngram

    alpha = args.length_norm
    beta = args.cover_penalty

    dec_conf(switchs, beam_size, search_mode, kl, nprocess, lmpath, ngram, alpha, beta, valid_set)

    config = getattr(configurations, 'get_config_cs2en')()
    _log('init decoder ... ')
    trans = Translate(**config)
    _log('done')

    _log('build decode funcs: f_init f_nh f_na f_ns f_mo f_ws f_ps f_ce f_next f_emb ... ', nl=False)
    fs = trans.build_sample()
    _log('done')

    y_im1 = [2]
    npy = np.asarray(y_im1)
    if npy.ndim == 1:
        x = npy[None, :]
    # context = np.random.sample((7, 1, 2048)).astype(np.float32)
    # s_im1 = np.random.sample((1, 1024)).astype(np.float32)
    debug('............. time testing ..............................')
    s = time.time()
예제 #25
0
                # _log(idx, line)
                queue.put((idx, line))
            return idx + 1

        def _finish_processes():
            for pidx in xrange(n_process):
                queue.put(None)

        def _retrieve_jobs(n_samples):
            trans = [None] * n_samples
            for idx in xrange(n_samples):
                resp = rqueue.get()
                trans[resp[0]] = resp[1]
                if numpy.mod(idx + 1, 1) == 0:
                    _log('Sample {}/{} Done'.format((idx + 1), n_samples))
            return trans

        _log('Translating ...')
        n_samples = _send_jobs(x_iter)     # sentence number in source file
        trans_res = _retrieve_jobs(n_samples)
        _finish_processes()
        _log('Done ...')

        return '\n'.join(trans_res)


if __name__ == "__main__":
    import sys
    res = valid_bleu(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
    _log(res)
예제 #26
0
파일: energybin.py 프로젝트: hombit/enrico
def PrepareEbin(Fit, FitRunner):
    """ Prepare the computation of spectral point in energy bins by
    i) removing the weak sources (TS<1) # not true
    ii) updating the config file (option and energy) 
    and save it in a new ascii file
    iii) changing the spectral model and saving it in a new xml file.
    A list of the ascii files is returned"""

    NEbin = int(FitRunner.config['Ebin']['NumEnergyBins'])

    config = FitRunner.config

    config['verbose'] ='no' #Be quiet
    #Replace the evt file with the fits file produced before
    #in order to speed up the production of the fits files
    config['file']['event'] = FitRunner.obs.eventfile
    #update the config to allow the fit in energy bins
    config['UpperLimit']['envelope'] = 'no' 
    config['Ebin']['NumEnergyBins'] = '0'#no new bin in energy!
    config['out'] = FitRunner.config['out'] + '/Ebin' + str(NEbin)
    config['Spectrum']['ResultPlots'] = 'no' #no SED plot/modelmap
    #copy the chose of the user for the enery bin computing
    config['Spectrum']['FitsGeneration'] = config['Ebin']['FitsGeneration'] 
    config['UpperLimit']['TSlimit'] = config['Ebin']['TSEnergyBins']
    tag = FitRunner.config['file']['tag']
    lEmax = np.log10(float(FitRunner.config['energy']['emax']))
    lEmin = np.log10(float(FitRunner.config['energy']['emin']))
    utils._log("Preparing submission of fit into energy bins")
    print("Emin = ", float(FitRunner.config['energy']['emin']),
          " Emax = ", float(FitRunner.config['energy']['emax']),
          " Nbins = ", NEbin)

    ener = np.logspace(lEmin, lEmax, NEbin + 1)
    os.system("mkdir -p " + config['out'])
    paramsfile = []

    srcname = FitRunner.config['target']['name']
    if config['UpperLimit']['TSlimit']>Fit.Ts(srcname) :
        _log('Re-optimize', False)
	print "An upper limit has been computed. The fit need to be re-optmized"
        Fit.optimize(0)

#    utils.RemoveWeakSources(Fit,srcname)#remove source with TS<1 to be sure that MINUIT will converge

    Pref = utils.ApproxPref(Fit, ener, srcname)
    Gamma = utils.ApproxGamma(Fit, ener, srcname)

    Model_type = Fit.model.srcs[srcname].spectrum().genericName()
    # if the model is not PowerLaw : change the model
    if not(Model_type == 'PowerLaw') :
      Fit.logLike.getSource(srcname).setSpectrum("PowerLaw") #Change model

    for ibin in xrange(NEbin):#Loop over the energy bins
        E = utils.GetE0(ener[ibin + 1],ener[ibin])
        print "Submition # ", ibin, " at energy ", E
        #Update the model for the bin
        NewFitObject = ChangeModel(Fit, ener[ibin], ener[ibin + 1], srcname, Pref[ibin] ,Gamma[ibin])
        Xmlname = (config['out'] + "/" + srcname +
                    "_" + str(ibin) + ".xml")
        NewFitObject.writeXml(Xmlname)# dump the corresponding xml file
        config['file']['xml'] = Xmlname
        #update the energy bounds
        config['energy']['emin'] = str(ener[ibin])
        config['energy']['emax'] = str(ener[ibin + 1])
        config['file']['tag'] = tag + '_Ebin' + str(NEbin) + '_' + str(ibin)
        filename =  config['target']['name'] + "_" + str(ibin) + ".conf"
        paramsfile.append(filename)
        config.write(open(config['out'] + '/' +paramsfile[ibin], 'w')) #save the config file in a ascii file

    return paramsfile
예제 #27
0
    def original_trans(self, x):

        x = x[0] if self.ifvalid else x  # numpy ndarray
        # subdict set [0,2,6,29999, 333]
        self.ptv = numpy.asarray(
            x[1], dtype='int32') if self.ifvalid and self.ifmv else None

        # k is the beam size we have
        x = numpy.asarray(x, dtype='int64')
        if x.ndim == 1:
            x = x[None, :]
        src_sent_len = x.shape[1]
        maxlen = src_sent_len * 2
        x = x.T

        sample = []
        sample_score = []

        live_k = 1
        dead_k = 0

        hyp_samples = [[]] * live_k
        hyp_scores = numpy.zeros(live_k).astype('float32')
        hyp_states = []

        # get initial state of decoder rnn and encoder context
        s_im1, ctx0, c_x0 = self.fn_init(x)
        y_im1 = [-1]  # indicator for the first target word (bos target)

        for ii in xrange(maxlen):
            # (src_sent_len, 1, 2*src_nhids) -> (src_sent_len, live_k, 2*src_nhids)
            ctx = numpy.tile(ctx0, [live_k, 1])
            debug('ctx')
            debug(ctx)
            c_x = numpy.tile(c_x0, [live_k, 1])
            debug('y_im1.................................................')
            debug(y_im1)
            debug('s_im1.................................................')
            debug(s_im1)
            yemb_im1, hi = self.fn_nh(y_im1, s_im1)
            debug('hi.................................................')
            debug(hi)
            pi, ai = self.fn_na(ctx, c_x, hi)
            debug('pi.................................................')
            debug(pi)
            debug('ai.................................................')
            debug(ai)
            s_im1 = s_i = self.fn_ns(hi, ai)  # note, s_im1 should be updated!
            debug('si')
            debug(s_i)
            mo = self.fn_mo(yemb_im1, ai, s_i)
            next_scores = self.fn_pws(mo, self.ptv)  # the larger the better

            next_ces = -next_scores if self.ifscore else self.fn_ce(
                next_scores)
            #cand_scores = hyp_scores[:, None] - numpy.log(next_scores)
            cand_scores = hyp_scores[:, None] + next_ces
            debug(str(ii) + ' ===============================================')
            debug('ce... i')
            debug(next_ces)
            cand_flat = cand_scores.flatten()
            # ranks_flat = cand_flat.argsort()[:(k-dead_k)]
            # we do not need to generate k candidate here, because we just need to generate k-dead_k
            # more candidates ending with eos, so for each previous candidate we just need to expand
            # k-dead_k candidates
            ranks_flat = part_sort(cand_flat, self.k - dead_k)
            # print ranks_flat, cand_flat[ranks_flat[1]], cand_flat[ranks_flat[8]]

            voc_size = next_scores.shape[1]
            trans_indices = ranks_flat // voc_size
            word_indices = ranks_flat % voc_size
            costs = cand_flat[ranks_flat]
            debug('ce... prev i')
            debug(costs)

            new_hyp_samples = []
            new_hyp_scores = numpy.zeros(self.k - dead_k).astype('float32')
            new_hyp_states = []

            for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
                new_hyp_samples.append(hyp_samples[ti] + [wi])
                new_hyp_scores[idx] = copy.copy(costs[idx])
                new_hyp_states.append(copy.copy(
                    s_i[ti]))  # here should be s_i !!!

            # check the finished samples
            new_live_k = 0
            hyp_samples = []
            hyp_scores = []
            hyp_states = []
            # current beam, if the hyposise ends with eos, we do not
            for idx in xrange(len(new_hyp_samples)):
                if new_hyp_samples[idx][-1] == self.eos_id:
                    sample.append(new_hyp_samples[idx])
                    sample_score.append(new_hyp_scores[idx])
                    # print new_hyp_scores[idx], new_hyp_samples[idx]
                    dead_k += 1
                else:
                    new_live_k += 1
                    hyp_samples.append(new_hyp_samples[idx])
                    hyp_scores.append(new_hyp_scores[idx])
                    hyp_states.append(new_hyp_states[idx])
            hyp_scores = numpy.array(hyp_scores)
            live_k = new_live_k
            debug('hyp_scores... prev i')
            debug(hyp_scores)
            debug('hyp_samples... prev i')
            for hyp_sample in hyp_samples:
                debug(hyp_sample)

            if new_live_k < 1:
                break
            if dead_k >= self.k:
                break

            y_im1 = numpy.array([w[-1] for w in hyp_samples])
            s_im1 = numpy.array(hyp_states)

        if live_k > 0:
            for idx in xrange(live_k):
                sample.append(hyp_samples[idx])
                sample_score.append(hyp_scores[idx])

        if self.ifnorm:
            lengths = numpy.array([len(s) for s in sample])
            avg_sample_score = sample_score / lengths
        else:
            avg_sample_score = sample_score
        sidx = numpy.argmin(avg_sample_score)

        best_sum_loss = sample_score[sidx]
        best_avg_loss = avg_sample_score[sidx]
        best_trans = sample[sidx]

        _log(
            '@source length[{}], translation length(with eos)[{}], maxlen[{}], avg loss'
            '[{}]={}/{}'.format(src_sent_len, len(best_trans), maxlen,
                                avg_sample_score[sidx], sample_score[sidx],
                                lengths[sidx]))
        _log('init[{}] nh[{}] na[{}] ns[{}] mo[{}] ws[{}] ps[{}] p[{}]'.format(
            *self.lqc))
        return _filter_reidx(self.bos_id, self.eos_id, best_trans,
                             self.tvcb_i2w, self.ifmv, self.ptv)