Ejemplo n.º 1
0
def GMM_Ineq_parall(Theta0, DATA_STRUCT, d_struct):
    Theta = {
        "comm_mu": Theta0[0],
        "priv_mu": Theta0[1],
        "epsilon_mu": Theta0[2],
        "comm_var": Theta0[3],
        "priv_var": Theta0[4],
        "epsilon_var": Theta0[5],
    }

    rng = np.random.RandomState(d_struct['rng_seed'])

    start = time.time()

    print('--------------------------------------------------------')
    print('current parameter set are :')
    print(Theta)
    '''
    parallel programming with two levels
        data separating
        runing the estimation
    '''
    data_n = len(DATA_STRUCT)

    work_pool = ThreadPool(nodes=data_n)

    cpu_num = multiprocessing.cpu_count()

    cpu_num_node = int((cpu_num - 1) / data_n)
    # change the submit to mpa so that we can run multi-part altogether
    results = work_pool.amap(
        partial(para_data_allo_1, Theta, cpu_num_node, rng, d_struct),
        iter(DATA_STRUCT))
    work_pool.close()
    while not results.ready():
        time.sleep(5)
        print(".")


#    work_pool.join()

    auction_result = np.nanmean(list(results.get()))

    end = time.time()

    print("object value : " + str(auction_result))
    print("time spend in this loop: ")
    print(end - start)
    print('--------------------------------------------------------\n')

    ## save the parameters and objective value

    with open('para.txt', 'a+') as f:
        for item in Theta0:
            f.write("%f\t" % item)

        f.write("%f\t" % auction_result)
        f.write("%f\n" % (end - start) / 60)

    return auction_result
            MRH = []
            skip_rate = int(evalsubset_relations.shape[0]/BATCH_EVAL)
            for j in xrange(0, skip_rate):
                eval_batch_h = evalsubset_relations[j::skip_rate,0]
                eval_batch_r = evalsubset_relations[j::skip_rate,1] 
                eval_batch_t = evalsubset_relations[j::skip_rate,2] 
                assert eval_batch_h.shape[0]==BATCH_EVAL
                
                indexes_t = sess.run(indices_t, \
                                        feed_dict = 
                                 {
                                    pos_h:eval_batch_h,                                
                                    pos_r:eval_batch_r,                                
                                    eval_to_rank:xrange(VOCABULARY_SIZE),
                                    keep_prob:1,
                                 })
                mrt = Evaluate_MR(eval_batch_t.tolist(), indexes_t.tolist(),P)
                MRT.extend(mrt)
                
            if not os.path.exists(LOG_DIR):
                os.makedirs(LOG_DIR)
            saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"), epoch)                
            with open(LOG_DIR+'/progress.txt','a+') as fp:        
                fp.write('Epoch %i: Minibatch MRT: %f\n\n' % (epoch, \
                                                        np.mean(MRT)))

        NOW_DISPLAY = False
        step += 1
        
P.close()
Ejemplo n.º 3
0
def backtest_mt(params):
    global capital
    su = None
    saveIndicators(candleamount=candleamount)
    #fix later
    candleSplice = candleData.tail(candleamount)

    atrseries = pd.Series(dtype=np.uint16)
    keltner_signals = pd.Series(dtype=object)
    engulf_signals = pd.Series(dtype=object)
    signals = pd.DataFrame(columns=['S'])
    atrperiod = params['atrperiod']
    #candleSplice = candleSplice.reset_index(drop=True)

    if (params['keltner'] == True) and (params['engulf'] == True):
        engulf_signals = pd.read_csv(
            'IndicatorData//' + params['symbol'] + '//Engulfing//' +
            "SIGNALS_t" + str(params['engulfthreshold']) + '_ignoredoji' +
            str(params['ignoredoji']) + '.csv',
            sep=',')
        keltner_signals = pd.read_csv('IndicatorData//' + params['symbol'] +
                                      '//Keltner//' + "SIGNALS_kp" +
                                      str(params['kperiod']) + '_sma' +
                                      str(params['ksma']) + '.csv',
                                      sep=',')
        signals = pd.concat([engulf_signals, keltner_signals], axis=1)
        signals.columns = ["E", "K"]
        signals['S'] = np.where((signals['E'] == signals['K']), Signal(0),
                                signals['E'])
    elif (params['keltner'] == True):
        keltner_signals = pd.read_csv('IndicatorData//' + params['symbol'] +
                                      '//Keltner//' + "SIGNALS_kp" +
                                      str(params['kperiod']) + '_sma' +
                                      str(params['ksma']) + '.csv',
                                      sep=',')
        signals['S'] = np.array(keltner_signals).reshape(
            1, len(keltner_signals))[0]
    elif (params['engulf'] == True):
        engulf_signals = pd.read_csv(
            'IndicatorData//' + params['symbol'] + '//Engulfing//' +
            "SIGNALS_t" + str(params['engulfthreshold']) + '_ignoredoji' +
            str(params['ignoredoji']) + '.csv',
            sep=',')
        signals['S'] = np.array(engulf_signals).reshape(
            1, len(engulf_signals))[0]
    print(signals['S'])
    #signals.to_csv('BacktestData//Signals//' + currentTime + '.csv')
    atrseries = pd.read_csv('IndicatorData//' + params['symbol'] + "//ATR//" +
                            "p" + str(atrperiod) + '.csv',
                            sep=',')
    copyIndex = candleSplice.index
    candleSplice = candleSplice.reset_index(drop=True)
    #candleSplice.merge(atrseries, left_index=True)
    #candleSplice.merge(signals['S'], right_on='S', left_index=True)
    candleSplice = pd.DataFrame.join(candleSplice, atrseries)
    candleSplice = pd.DataFrame.join(
        candleSplice, signals['S'])  #COMBINE SIGNALS AND CANDLE DATA
    candleSplice.index = copyIndex
    candleSplice['timestamp'] = pd.to_datetime(candleSplice.timestamp)
    finalCapitalData = None
    currentTime = datetime.now().strftime("%Y%m%d-%H%M")
    backtestDir = params['symbol'] + '//' + "len" + str(
        candleamount) + "_k" + str(params['keltner']) + "_e" + str(
            params['engulf']
        ) + "_id" + str(params['ignoredoji']) + "_eThrs" + str(
            params['engulfthreshold']
        ) + "_ATR" + str(params['atrperiod']) + "_kP" + str(
            params['kperiod']) + "_kSMA" + str(params['ksma']) + "_pm" + str(
                params['posmult']) + "_ST" + params['stoptype'] + "_sm" + str(
                    params['stopmult']) + "_tm" + str(
                        params['tmult']) + "_TR" + params['trade']

    bt_profit = 0

    if (percision != 1):
        isafe = []
        candleSplit = []
        initialLength = len(candleSplice)
        firstStart = candleSplice.index[0]
        lastDistanceSafe = None
        if params['symbol'] == 'XBTUSD':
            su = xbtusd_su
        elif params['symbol'] == 'ETHUSD':
            su = ethusd_su
        for i in range(percision - 1):
            #abs() is a temporary fix to running the backtest on short intervals
            isafe.append((i + 1) *
                         ((abs(initialLength - percision * su)) / percision) +
                         i * su)
        #candleSplit = list(np.array_split(candleSplice, percision))
        #candleSplit = list(candleSplit)
        for i in isafe:
            ia = int(i)
            if isafe.index(i) != 0:
                candleSplit.append(candleSplice.iloc[int(isafe[isafe.index(i) -
                                                               1]):ia + 1])
            lastDistanceSafe = ia
            #print("lds", lastDistanceSafe)
        # else:
        #candleSplit.append(candleSplice.iloc[:ia+1])
        #print("lds", lastDistanceSafe)
        #if(len(isafe) > 1):
        candleSplit.append(candleSplice.iloc[lastDistanceSafe:])

        #print(candleSplit)
        #time.sleep(100)
        #generate parameters for multithreading
        safe_length = len(candleSplit)
        safe_candleamount = np.repeat(candleamount, safe_length).tolist()
        safe_capital = np.repeat(capital, safe_length).tolist()
        safe_params = np.repeat(params, safe_length).tolist()

        withSafe = np.repeat(True, safe_length).tolist()

        print("safe thread amount:", safe_length)
        #create multithread pool
        start = time.time()
        #print(candleSplit)
        #time.sleep(1000)
        pool = ThreadPool(safe_length)

        #run initial chunks multithreaded to find safepoints
        safe_results = pool.uimap(backtest_strategy, safe_candleamount,
                                  safe_capital, safe_params, candleSplit,
                                  withSafe)

        pool.close()  #Compute anything we need to while threads are running
        candleSafe = []
        final_length = safe_length + 2
        withoutSafe = np.repeat(False, final_length).tolist()
        final_candleamount = np.repeat(candleamount, final_length).tolist()
        final_capital = np.repeat(capital, final_length).tolist()
        final_params = np.repeat(params, final_length).tolist()
        static_capital = capital

        safePoints = list(safe_results)  ######################################
        #time.sleep(1000)
        pool.join()

        for i in safePoints:
            if i == -1:
                backtest_mt.q.put(
                    'Not all safe points found for given percision. Reduce percision, or increase timeframe'
                )
                return
        safePoints = sorted(safePoints)

        if find_su:
            su = []
            for i, point in enumerate(safePoints):
                su.append(point - candleSplit[i].index[0])
            suAvg = mean(su)
            #only works on evenly spliced chunks
            chunkLength = len(candleSplit[0])
            backtest_mt.q.put(["su average:", suAvg, ' / ', chunkLength])
            return (su)

        print("safe points:", safePoints)
        idx = 0
        for i in safePoints:
            ia = i - firstStart
            idx = safePoints.index(i)
            if safePoints.index(i) != 0:
                candleSafe.append(candleSplice.iloc[lastDistanceSafe - idx:ia +
                                                    1])
                lastDistanceSafe = ia + 1
            else:
                candleSafe.append(candleSplice.iloc[:ia + 1])
                lastDistanceSafe = ia + 1
        candleSafe.append(candleSplice.iloc[lastDistanceSafe - idx:])

        print("final thread amount:", final_length)
        #print(candleSafe)
        #time.sleep(10000)
        fpool = ThreadPool(final_length)
        final_results = fpool.uimap(backtest_strategy, final_candleamount,
                                    final_capital, final_params, candleSafe,
                                    withoutSafe)
        fpool.close()
        final_result = list(final_results)
        fpool.join()

        ordered_result = sorted(final_result, key=lambda x: x[0])
        for i in range(len(ordered_result)):
            #print(final_result.index)
            if i != 0:
                #for non-static position size:
                ##capital += capital*((i[1]-static_capital)/static_capital)
                ordered_result[i][1]['capital'] += bt_profit
                bt_profit = ordered_result[i][1].iloc[-1][
                    'capital'] - static_capital
                finalCapitalData = pd.concat(
                    [finalCapitalData, ordered_result[i][1]],
                    ignore_index=True)
            else:
                bt_profit = ordered_result[i][1].iloc[-1][
                    'capital'] - static_capital
                finalCapitalData = pd.DataFrame(ordered_result[i][1])
        capital = finalCapitalData['capital'].iloc[-1]
    else:
        #run chunks spliced by safepoints multithreaded to retrieve fully accurate results
        final_results = backtest_strategy(candleamount, capital, params,
                                          candleSplice, False)
        final_result = list(final_results)
        capital = str(final_result[1]['capital'].iloc[-1])
        finalCapitalData = final_result[1]

    print(finalCapitalData)
    #time.sleep(1000)
    visualize_trades(finalCapitalData, backtestDir)
    saveBacktest(capital, params, backtestDir)
    backtest_mt.q.put(capital)
    end = time.time()
    print("Thread time: ", end - start)
    return ('done')
Ejemplo n.º 4
0
def make_patches(data_root, patches_root, patch_size, outline_filled=None, remove_filled=False, min_widths=('def',),
                 mirror=True, rotations=(0,), translations=((0, 0),), distinguishability_threshold=.5, num_workers=0,
                 random_samples=None, leave_width_percentile=None):
    if num_workers != 0:
        from pathos.multiprocessing import cpu_count, ProcessingPool
        from pathos.threading import ThreadPool
        if num_workers == -1:
            optimal_workers = cpu_count() - 1
            workers_pool = ProcessingPool(optimal_workers)
        else:
            workers_pool = ProcessingPool(num_workers)
        print(f'Workers pool: {workers_pool}')

        savers_pool = ThreadPool(1)
        saving_patches_in_bg = savers_pool.amap(lambda a: None, [])
    else:
        workers_pool = 0

    path = lambda basename, origin, width='def', ori='def', rot=0, t=(0, 0): os.path.join(patches_root, basename,
                                                                                          '{}x{}'.format(*patch_size),
                                                                                          'width_{}'.format(width),
                                                                                          'orientation_{}'.format(ori),
                                                                                          'rotated_deg_{}'.format(rot),
                                                                                          'translated_{}_{}'.format(*t),
                                                                                          '{}_{}.svg'.format(*origin))

    orientations = ['def']
    if mirror:
        orientations.append('mir')

    if random_samples is not None:
        min_widths_all = deepcopy(min_widths)
        orientations_all = deepcopy(orientations)
        rotations_all = deepcopy(rotations)
        translations_all = deepcopy(translations)

    source_images = glob(os.path.join(data_root, '**', '*.svg'), recursive=True)
    for file in source_images:
        print('Processing file {}'.format(file))
        basename = file[len(data_root) + 1:-4]  # split data_root and extension

        vector_image = VectorImage.from_svg(file)
        if remove_filled:
            vector_image.remove_filled()
        if outline_filled is not None:
            vector_image.leave_only_contours(outline_filled)
        if leave_width_percentile is not None:
            vector_image.leave_width_percentile(leave_width_percentile)

        if random_samples is not None:
            min_widths = np.random.choice(min_widths_all, size=min(random_samples, len(min_widths_all)), replace=False)
            orientations = np.random.choice(orientations_all, size=min(random_samples, len(orientations_all)),
                                            replace=False)
            rotations = np.random.choice(rotations_all, size=min(random_samples, len(rotations_all)), replace=False)
            translations = translations_all[
                np.random.choice(len(translations_all), size=min(random_samples, len(translations_all)), replace=False)]

        for width in min_widths:
            print('\twidth {}'.format(width))
            if width == 'def':
                vector_image_scaled = vector_image
            else:
                vector_image_scaled = vector_image.copy()
                vector_image_scaled.scale_to_width('min', width)
            for orientation in orientations:
                print('\t\torientation {}'.format(orientation))
                if orientation == 'def':
                    vector_image_reoriented = vector_image_scaled
                else:
                    vector_image_reoriented = vector_image_scaled.mirrored()
                for rotation in rotations:
                    print('\t\t\trotation {}'.format(rotation))
                    vector_image_rotated = vector_image_reoriented.rotated(rotation, adjust_view=True)
                    for translation in translations:
                        print('\t\t\t\ttranslation {}'.format(translation))
                        vector_image_translated = vector_image_rotated.translated(translation, adjust_view=True)

                        vector_patches = vector_image_translated.split_to_patches(patch_size, workers=workers_pool)
                        if num_workers != 0:
                            print('\t\t\t\t\twaiting for previous batch to be saved')
                            saving_patches_in_bg.get()

                        def simplify_and_save(vector_patch, basename=basename, width=width, orientation=orientation,
                                              rotation=rotation, translation=translation):
                            vector_patch.simplify_segments(distinguishability_threshold=distinguishability_threshold)
                            if len(vector_patch.paths) == 0:
                                return
                            save_path = path(basename,
                                             (int(vector_patch.x.as_pixels()), int(vector_patch.y.as_pixels())), width,
                                             orientation, rotation, translation)
                            os.makedirs(os.path.dirname(save_path), exist_ok=True)
                            vector_patch.save(save_path)

                        if num_workers == 0:
                            print('\t\t\t\t\tsaving patches')
                            for vector_path in vector_patches.reshape(-1):
                                simplify_and_save(vector_path)
                        else:
                            print('\t\t\t\t\tsaving patches')
                            saving_patches_in_bg = savers_pool.amap(simplify_and_save, vector_patches.reshape(-1))

    if num_workers != 0:
        workers_pool.close()
        workers_pool.join()
        workers_pool.clear()

        savers_pool.close()
        savers_pool.join()
        savers_pool.clear()