예제 #1
0
def rocket_launch_count():
    """ rocket count """
    rocket_info = launch_data[['rocket']].to_dict(orient='index')
    rocket_types = []
    for _, value in rocket_info.items():
        rocket_types.append(value['rocket']['rocket_name'])

    rocket_type_series = pandas.Series(rocket_types)
    rocket_type_counts = rocket_type_series.value_counts()
    return rocket_type_counts
예제 #2
0
def successful_rocket_launch_count():
    """succesful rocket count """
    successful_launches_df = launch_data[launch_data['launch_success'] == True]
    successful_rocket_info = successful_launches_df[['rocket'
                                                     ]].to_dict(orient='index')
    successful_rocket_types = []
    for _, value in successful_rocket_info.items():
        successful_rocket_types.append(value['rocket']['rocket_name'])

    successful_rocket_type_series = pandas.Series(successful_rocket_types)
    successful_rocket_type_counts = successful_rocket_type_series.value_counts(
    )
    return successful_rocket_type_counts
예제 #3
0
def rocket_launch_stats():
    """ average successful launches by rocket type"""
    rocket_launch_averages = {}
    for rocket_name, rocket_count, successful_rocket_count in zip(
            rocket_launch_count().index.to_list(), rocket_launch_count(),
            successful_rocket_launch_count()):
        success_average = round((successful_rocket_count / rocket_count * 100),
                                2)
        rocket_launch_averages[rocket_name] = success_average
        rocket_launch_averages_series = pandas.Series(rocket_launch_averages)
        rocket_launch_averages_plot = rocket_launch_averages_series.plot(
            kind='barh').get_figure()
    rocket_launch_stats = configure_graph(
        series_plot=rocket_launch_averages_plot,
        title='Successful Launch Averages by Rocket Name',
        x_label='Averages (%)',
        y_label='Name',
        count_values=list(rocket_launch_averages_series))
    return rocket_launch_stats
예제 #4
0
def site_usage():
    """ return flights by rocket graph """
    launch_site_info = launch_data[['launch_site']].to_dict(orient='index')
    launch_site_locations = []

    for _, value in launch_site_info.items():
        launch_site_locations.append(value['launch_site']['site_name'])

    launch_site_locations_series = pandas.Series(launch_site_locations)
    launch_site_location_counts = launch_site_locations_series.value_counts()
    launch_site_location_count_plot = launch_site_location_counts.plot(
        kind='barh').get_figure()
    launch_site_location_count_list = list(launch_site_location_counts)
    launch_site_location_list_stats = configure_graph(
        series_plot=launch_site_location_count_plot,
        title='Number of times Launch Site were used',
        x_label='Times Used',
        y_label='Site',
        count_values=launch_site_location_count_list)
    return launch_site_location_list_stats
예제 #5
0
        def curva_promedio(ansem,up,variedad,edad):
            try:
              valor=promedio[ansem,up,variedad,edad]['coeficiente']
              return valor
            except:
            return 0
        datos.ano_semana=datos.ano_semana.astype(str)

        datos=datos.merge(semanas,on='ano_semana',how='left')
        datos['mes_dato']=datos.dia.dt.month     

        recons=datos.sort_values(['concatenado','edad']).reset_index(drop=True)

        lag_prod=10

        for i in tqdm(range(1,lag_prod+1)):
            strprod=str(i)+'sem_atras'
            strconc=str(i)+'concat_atras'
            recons[strprod]=recons['coeficiente'].shift(i)
            recons[strconc]=recons['concatenado'].shift(i)
            vald=str(i)+'valido'
            recons[vald]=recons.apply(lambda ff: 1 if ff[strconc]==ff['concatenado'] else 0,axis=1)
            recons[strprod]=recons.apply(lambda x: x[strprod] if x[vald]==1 else 0,axis=1)
            recons.drop(columns={strconc},inplace=True)
            recons.drop(columns={vald},inplace=True)

        recons.drop(columns={'Unnamed: 0'},inplace=True)
        recons=recons.merge(colores[['variedad','Color']],how='left',on='variedad')

        #agrega la columna de curva estandar para cada variedad-finca
        recons.ano_semana=(recons.ano_semana).astype(str)
        recons['curva_metodo_finca'] = recons.apply(lambda x: curva_promedio(x['ano_semana'],x['finca'],x['variedad'],x['edad']),axis=1)
        recons=recons[recons['tipo'].isin(['Minicarnation','Carnation'])]
        recons.Color.fillna('NoColor',inplace=True)
        recons['edad^2']=recons['edad']**2
        recons['edad^3']=recons['edad']**3


        #Red Neuronal
        consolidado_rn=pd.DataFrame()
        from sklearn.preprocessing import StandardScaler
        from tensorflow import keras
        from tensorflow.keras import layers
        recons=recons[recons['dia']>='01/01/2018']
        y_hat_rn=pd.Series(name='y_hat_falso')

        for i in recons.tipo.unique():
            for j in recons_test[recons_test['tipo']==i]['Color'].unique():
                temp_test=recons_test[(recons_test['tipo']==i)&(recons_test['Color']==j)]
                df_clean_test=pd.concat([temp_test[['edad','edad^2','edad^3','mes_dato','5sem_atras',
                                '6sem_atras','7sem_atras','8sem_atras','9sem_atras','10sem_atras',
                                 #'11sem_atras','12sem_atras','13sem_atras','14sem_atras','15sem_atras',
                                  'curva_metodo_finca','coeficiente']], pd.get_dummies(temp_test['variedad']), pd.get_dummies(temp_test['finca'])], axis=1)
                df_clean_test.fillna(value=0,inplace=True)
                y_real_test = df_clean_test.coeficiente
                X_real_test = df_clean_test.drop('coeficiente', axis=1)

                temp=recons[(recons['tipo']==i)&(recons['Color']==j)]
                temp=temp[temp['variedad'].isin(temp_test['variedad'].unique())]
                temp=temp[temp['finca'].isin(temp_test['finca'].unique())]
                df_clean=pd.concat([temp[['edad','edad^2','edad^3','mes_dato','5sem_atras',
                                 '6sem_atras','7sem_atras','8sem_atras','9sem_atras','10sem_atras',
                                  #'11sem_atras','12sem_atras','13sem_atras','14sem_atras','15sem_atras',
                                  'curva_metodo_finca','coeficiente']], pd.get_dummies(temp['variedad']), pd.get_dummies(temp['finca'])], axis=1)

                df_clean.fillna(value=0,inplace=True)
                y = df_clean.coeficiente
                X = df_clean.drop('coeficiente', axis=1)
                X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
                scaler = StandardScaler()
                X_train_std = pd.DataFrame(scaler.fit_transform(X_train), columns = X_train.columns)
                neurons = 256
                model = keras.Sequential([layers.Dense(neurons, activation='relu', input_shape=[len(X_train_std.columns)]),
                                  layers.Dense(neurons,activation='relu'),
                                  layers.Dense(1,activation='relu')])   #Capa salida
                model.compile(loss='mse', optimizer = 'adam')
                history = model.fit(X_train_std, y_train, epochs=100, validation_split = 0.2, verbose=0,batch_size=100)

                X_norm = scaler.transform(X_real_test)
                indice=X_real_test.reset_index()['index']
                y_hat=model.predict(X_norm)
                y_hat=pd.Series(y_hat[0:,0],name='y_hat')
                y_hat.index=X_real_test.index
                y_hat_rn=pd.concat([y_hat_rn,y_hat],axis=1)

        y_hat_rn.drop(columns={'y_hat_falso'},inplace=True)
        ser_y_hat=np.sum(y_hat_rn,axis=1)
        y_hat_rn['y_hat_red_n']=ser_y_hat
        validacion_y_hat=y_hat_rn[['y_hat_red_n']]
        validacion_final=pd.concat([recons_test,validacion_y_hat],axis=1)
                          
예제 #6
0
def main():
    SetupLogger()
    logging.debug("now is %s", datetime.datetime.now())
    logging.getLogger().setLevel(logging.INFO)

    cmdLineParser = argparse.ArgumentParser("api tests")
    cmdLineParser.add_argument("-a",
                               "--account",
                               action="store",
                               type=str,
                               dest="account",
                               help="The account number to use")
    cmdLineParser.add_argument("-p",
                               "--port",
                               action="store",
                               type=int,
                               dest="port",
                               default=7497,
                               help="The TCP port to use")
    cmdLineParser.add_argument("-C",
                               "--global-cancel",
                               action="store_true",
                               dest="global_cancel",
                               default=False,
                               help="whether to trigger a globalCancel req")
    args = cmdLineParser.parse_args()
    print("Using args", args)
    logging.debug("Using args %s", args)

    # <<<<<<<< BEGIN
    # Be sure to read requirements:
    #   https://interactivebrokers.github.io/tws-api/introduction.html#requirements
    # AND IMPORTANT TWS Setup
    #   https://interactivebrokers.github.io/tws-api/initial_setup.html
    try:
        bot1 = sincBot()
        bot1.clientId = random.randint(
            1, 32)  #This can be random or 0 for master client
        bot1.account = args.account  #Set this to your account number
        bot1.portNum = args.port  #Set this to the portnumber you configured in Trader
        #
        #Read constraints in API Doc:
        # https://interactivebrokers.github.io/tws-api/historical_bars.html
        bot1.duration = '1 M'  #How far back to go
        bot1.barSize = '1 day'
        bot1.type = 'TRADES'
        bot1.timeStr = datetime.datetime.now().strftime(
            '%Y%m%d %H:%M:%S')  #From today

        bot1.reconnect()

        # Add interesting symbols here
        symbols = numpy.array(['AAPL', 'MSFT'])
        data = pandas.Series(symbols)
        bot1.start(data)  #This begine executions

        print(bot1.cache)

        bot1.stop()

        sys.exit(1)
        # ! [clientrun]

    except:
        raise
    finally:
        bot1.dumpTestCoverageSituation()
        bot1.dumpReqAnsErrSituation()