示例#1
0
def response_performed(attackDetails, responseTechnique, techniquePath):
    #Send a message to server letting it know that the response technique is performed
    client.sock.send(
        serialize('ack', '{}!.!{}'.format('response performed',
                                          responseTechnique)))
    #The client waits for the server to finish the effectiveness test attack
    print('Waiting for the server to perform the effectiveness attack')
    client.FINISH_TESTING_EFFECTIVENESS.wait()
    #If both are true, that means the response technique failed to stop the attack
    print('Server done with effectiveness attack')
    if UNDER_ATTACK.is_set(): isEffective = False
    elif not UNDER_ATTACK.is_set(): isEffective = True
    #Clear flags
    UNDER_ATTACK.clear()
    client.FINISH_TESTING_EFFECTIVENESS.clear()
    #Load the saved file
    print('Loading the log file')
    logFile = read_data('data/logs/{}/{}.json'.format(attackDetails,
                                                      responseTechnique))
    #Add effective status to the json file
    logFile['is_effective'] = isEffective
    #Send the log so we don't keep the server waiting for log file, in case this was the last technique to be assessed
    print('Sending the log file')
    client.sock.send(
        send_log(
            'log', "{}!.!{}!.!{}!.!{}".format(
                specifications['software_version'],
                specifications['hardware_specifications'], attackDetails,
                responseTechnique), logFile))
    #After sending the log, delete the log file
    try:
        os.remove('data/logs/{}/{}.json'.format(attackDetails,
                                                responseTechnique))
    except:
        print("Error while deleting file: ", logFile)

    #Revert changes done when the response technique applied
    print('Running revert operation')
    subprocess.run(shlex.split("{} {}".format(techniquePath, "revert")))
    print('Revert operation successful')
    #After getting back to the original state, change busy status to false
    specifications['is_busy'] = False
    #Save changes
    save_data('data/specifications.json', specifications)
    #Inform the server that the car is no longer busy
    print(
        'Sending busy status to server. Setting it to false (the car is ready for more work)'
    )
    client.sock.send(
        serialize('ack', '{}!.!{}'.format('client busy status', 'false')))
示例#2
0
def running_response_technique(attackDetails, responseTechnique, path):
    #A place holder for log information.
    tempDict = {}
    tempDict['cpu_utilization'] = '100'

    print("Running {} from path {}".format(responseTechnique, path))
    if specifications['is_collaborative']:
        os.makedirs(os.path.dirname('data/logs/{}/{}.json'.format(
            attackDetails, responseTechnique)),
                    exist_ok=True)
        save_data(
            'data/logs/{}/{}.json'.format(attackDetails, responseTechnique),
            tempDict)

    #Run response technique
    subprocess.run(shlex.split("{} {}".format(path, "apply")))
示例#3
0
def log_received(data):
    print('A log has been received!')
    #Get contents between delimiters
    tempMessage = data.split(b'!:START:!')[-1].split(b'!:FINISH:!')[0]
    #Get the rest of file data
    tempFile = data.rsplit(b'!:FINISH:!', 1)[1]
    #Filter them out
    tempMessage = tempMessage.decode('utf-8')
    message = tempMessage.split('!.!')
    softwareVersion = message[0]
    hardwareSpecifications = message[1]
    attackDetails = message[2]
    responseTechnique = message[3]
    file = pickle.loads(tempFile)
    #Save the file
    filePath = 'data/logs/{}/{}/{}/{}.json'.format(softwareVersion, hardwareSpecifications, attackDetails, responseTechnique)
    save_data(filePath, file)
    #Update the log path in the response techniques data
    responseTechniquesData[softwareVersion][hardwareSpecifications][attackDetails][responseTechnique]['log_path'] = filePath
示例#4
0
def evaluate_techniques(self, responseTechniques):
    print("Evaluating techniques...")
    #If one technique is not evaluated, then start the evaluation process
    isEvaluationRequired = False
    for technique in responseTechniques:
        if not responseTechniques[technique]['is_evaluated']:
            isEvaluationRequired = True
            break
    #Otherwise, no need for evaluation
    if not isEvaluationRequired : return
    #Update response techniques database
    import glob
    src = "data/logs/{}/{}/{}/".format(self.softwareVersion, self.hardwareSpecifications, self.attackDetails)
    files = glob.glob('{}/*'.format(src), recursive=False)
    # Loop through files
    for single_file in files:
        json_file = read_data(single_file)
        fileName = os.path.basename(single_file)
        techniqueName = fileName.replace('.json', '')
        isEffective = json_file['is_effective']
        responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][techniqueName]['is_effective'] = isEffective

    #Add every effective technique and its duration    
    effectiveTechniques = {}
    for technique in responseTechniques:
        if responseTechniques[technique]['is_effective']: 
            effectiveTechniques[technique] = responseTechniques[technique]['duration']
    #Get the lowest duration technique
    bestResponseTechnique = min(effectiveTechniques, key=effectiveTechniques.get)
    print("Effective techniques are: ",  effectiveTechniques)
    print("Best effective response technique is {}.".format(bestResponseTechnique))
    #Modify as necessary
    responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][bestResponseTechnique]['is_most_efficient'] = True
    responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][bestResponseTechnique]['is_evaluated'] = True
    for technique in responseTechniques:
        if not technique == bestResponseTechnique:
            responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][technique]['is_most_efficient'] = False
            responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][technique]['is_evaluated'] = True

    #Save changes to the json file
    save_data('data/response_techniques_database.json', responseTechniquesData)
示例#5
0
def response_system(attackDetails):
    #Under attack state is being processed, clear it
    UNDER_ATTACK.clear()
    if not specifications['is_collaborative']:
        #After detecting the attack -> request evaluation from the server
        evaluationThread = threading.Thread(
            target=request_evaluation(attackDetails))
        evaluationThread.start()
        #Wait for the thread to finish
        evaluationThread.join()
        #Clear the response ack
        client.RESPONSE_ACK.clear()
        #Load the response system
        responseSystem = read_data('data/response_system.json')
        responseTechnique = responseSystem[attackDetails]
        techniquePath = responseTechniquesData[
            responseTechnique]  # The path for the technique script
        running_response_technique(attackDetails, responseTechnique,
                                   techniquePath)
    else:
        #Assign car to busy and save it locally
        specifications['is_busy'] = True
        save_data('data/specifications.json', specifications)
        #Load the response system
        responseSystem = read_data('data/response_system.json')
        responseTechnique = responseSystem[attackDetails]
        #Run the response technique applied by the response system
        print('Under {} attack! Running {}!'.format(attackDetails,
                                                    responseTechnique))
        techniquePath = responseTechniquesData[
            responseTechnique]  # The path for the technique script
        #Generate a new thread and run the response technique script
        responseThread = threading.Thread(target=running_response_technique(
            attackDetails, responseTechnique, techniquePath))
        responseThread.start()
        #Wait for the thread to finish
        responseThread.join()
        response_performed(attackDetails, responseTechnique, techniquePath)
示例#6
0
def evaluation_requested(self, data):
    message = pickle.loads(data).split('!.!')
    self.attackDetails = message[0]
    self.responseTechniqueApplied = message[1]
    print('Evaluation requested by ID: {} for {}. The applied response technique is: {}.'.format(str(self.id), self.attackDetails, self.responseTechniqueApplied))
    #Start observer to wait for log arrivals
    #Monitor the directory and wait for the new file to arrive from the car
    logging.basicConfig(level=logging.ERROR)
    path = "data/logs/{}/{}/{}/".format(self.softwareVersion, self.hardwareSpecifications, self.attackDetails)
    os.makedirs(os.path.dirname(path), exist_ok=True)
    #Run observer to monitor log file changes
    observer = Observer()
    event_handler = FileEventHandler(observer)
    observer.schedule(event_handler, path, recursive=False)
    observer.start()
    #Load response techniques based on the software version, hardware specifications, and attack
    suitableTechniques = responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails]
    #Loop through the suitable techniques and assess un-assessed techniques
    assess_techniques(self, suitableTechniques)
    #Wait for the assessment to be done
    ASSESSMENT_FLAG.wait()
    #Clear the assessment flag
    ASSESSMENT_FLAG.clear()
    #Terminate the observer
    observer.stop()
    observer.join()
    
    #Save changes to the json file
    save_data('data/response_techniques_database.json', responseTechniquesData)

    #After assessing all un-assessed techniques, evaluate them
    evaluate_techniques(self, suitableTechniques)
    
    #After making sure that all techniques are assessed and evaluated, find the 
    # best response technique for that metric and send it as update message
    final_update(self, self.attackDetails, suitableTechniques)
示例#7
0
def apply_update(attackDetails, responseTechnique):
    print('Update received! Applying: {} for: {}'.format(responseTechnique, attackDetails))
    #Read and update the response_system file
    responseSystem = read_data('data/response_system.json')
    responseSystem[attackDetails] = responseTechnique
    save_data('data/response_system.json', responseSystem)
示例#8
0
def exp(args):
    #Get Image Data
    x_train, y_train, x_test, y_test, prior_test = load_dataset(args.dataset)

    #Get Sets Sizes
    args.set_sizes = get_set_sizes(args.sets, len(x_train), args.set_size_gen)

    #Randomly Generate Priors Pi
    args.Pi = get_Pi(args.sets, args.Pi_gen)

    #Sample Data According to Pi and Sets Sizes
    U_sets, priors_corr = get_U_sets(args.sets, y_train, args.set_sizes,
                                     args.Pi)

    print('Data prepared!')
    print("set_sizes: " + str(args.set_sizes))
    print("test class prior: " + str(prior_test))
    print("Pi: " + str(args.Pi))

    # Get Model
    ExpModel = MultiLayerPerceptron(dataset=args.dataset,
                                    sets=args.sets,
                                    set_sizes=args.set_sizes,
                                    Pi=args.Pi,
                                    mode=args.mode,
                                    weight_decay=args.weightdecay)

    # Schedule Learning Rate if not specified
    if args.learningrate == -1:
        args.learningrate = lr_scheduler(args.dataset, args.sets, args.mode)

    # Get optimizer
    ExpModel.optimizer = Adam(args.optimizer,
                              lr=args.learningrate,
                              decay=args.lr_decay)

    # Build Model
    input_shape = x_train[0].shape
    ExpModel.build_model(priors_corr,
                         prior_test,
                         args.Pi,
                         input_shape,
                         mode=args.mode)

    #-----------------------------------------------------Start Training-----------------------------------------------------#
    history, loss_test = ExpModel.fit_model(U_sets=U_sets,
                                            x_train_total=x_train,
                                            batch_size=args.batchsize,
                                            epochs=args.epoch,
                                            x_test=x_test,
                                            y_test=y_test,
                                            Pi=args.Pi,
                                            priors_corr=priors_corr,
                                            prior_test=prior_test,
                                            mode=args.mode)
    np_loss_test = np.array(loss_test)
    np_loss_train = np.array(history['loss'])

    plot_curve(np_loss_test,
               args.epoch,
               label=args.mode,
               phase='test',
               dataset=args.dataset)
    plot_curve(np_loss_train,
               args.epoch,
               label=args.mode,
               phase='train',
               dataset=args.dataset)

    #---------------------------------------------Save files----------------------------------------------------------------#
    save_data(args, U_sets, priors_corr, prior_test, np_loss_train,
              np_loss_test)
示例#9
0
        step=0
        for v in D:
            step+=1
            u=v #u is actually v0
            u=np.expand_dims(u,1)
            for t in range(k):
                h=sampleH(W,c,u)
                u=sampleU(W,b,h)
            
            v=np.expand_dims(v,axis=1)
            
            W += eta*(np.matmul(sigmoid(np.matmul(W,v)+c),v.T)-np.matmul(sigmoid(np.matmul(W,u)+c),u.T))
            b += eta*(v.astype(np.int8) -u.astype(np.int8))
            c += eta*(sigmoid(np.matmul(W,v)+c)-sigmoid(np.matmul(W,u)+c))
            loss+=np.sum(np.abs(v.astype(np.int8) -u.astype(np.int8)))
        print 'epoch %d , loss %.4f'%(epoch,loss/len(D))
        lost.append(loss/len(D))
    return W,b,c


# In[ ]:


W,b,c=RBM(200,training_data)

# In[ ]:


save_data([W,b,c],'weights.bin')

示例#10
0
def persistence():
    '''Persists data every 10 seconds'''
    while True:
        save_data()
        sleep(10)
示例#11
0
文件: model.py 项目: maigag/3DVAAEGAN
    def train(self, sess, X, Y, label_names, label_colors, label_map_path,
              sampleOutputPath):

        # load model in case
        if (self.loadModel > -1):
            print("LOAD THE MODEL . . .")
            self.saver.restore(sess,
                               './checkpoints/VAE_GAN-' + str(self.loadModel))
            print("DONE!")

        # if new model set tensorboard
        summaryOp = tf.summary.merge_all()
        summaryWriter = tf.summary.FileWriter(
            "train/gan_{}".format(datetime.datetime.now().strftime("%s")),
            sess.graph)

        # get length of dataset
        nData = len(X)
        # sample noise Z to generate image - normal distributed
        sampleZ = np.random.normal(0, 1, size=self.zDim)

        # start training
        self.step = 0
        self.losses = []

        # train bool
        trainEncD = True
        trainD = True

        # epoch loop
        startEpoch = int(self.loadModel) + 1
        for epoch in range(startEpoch, self.epochs):

            # print
            print("Starting epoch n {}/{} : ".format(epoch + 1, self.epochs))

            # compute number of iteraton for an epoch
            nIter = nData
            # randomize indexes for batches
            batchAllIndexes = np.random.random_integers(0,
                                                        nData - 1,
                                                        size=nData)

            # iter loop
            for iter in range(nIter):

                # increment step counter
                self.step += 1
                print("Starting iteration n {}/{} : ".format(iter + 1, nIter))

                # get batch indexes
                batchIndexes = batchAllIndexes[iter]

                # get data and convert it to correct format
                thisX, thisY = h.processScene(X[batchIndexes], Y[batchIndexes],
                                              self.subVolumeShape)

                # apply some noise to the data
                noiseX = np.random.normal(loc=0,
                                          scale=0.001,
                                          size=self.subVolumeShape)
                noiseY = np.random.normal(loc=0,
                                          scale=0.001,
                                          size=self.subVolumeShape)
                thisX = np.add(thisX, noiseX)
                thisY = np.add(thisY, noiseY)

                # sample latent sample
                thisZ = np.random.normal(0, 1, size=self.zDim)

                # UPDATE E
                _ = sess.run(self.encTrainOpt,
                             feed_dict={
                                 self.x: thisX,
                                 self.y: thisY,
                                 self.z: thisZ
                             })

                # UPDATE G
                _, thisSummary = sess.run([self.genTrainOpt, summaryOp],
                                          feed_dict={
                                              self.x: thisX,
                                              self.y: thisY,
                                              self.z: thisZ
                                          })

                # UPDATE D
                if (trainD):
                    # sample random noise for D
                    _ = sess.run(self.discTrainOpt,
                                 feed_dict={
                                     self.x: thisX,
                                     self.y: thisY,
                                     self.z: thisZ
                                 })

                # UPDATE ENC D
                if (trainEncD):
                    _ = sess.run(self.encDiscTrainOpt,
                                 feed_dict={
                                     self.x: thisX,
                                     self.y: thisY,
                                     self.z: thisZ
                                 })

                # write summary
                summaryWriter.add_summary(thisSummary, self.step)

                # eval losses and at the end of each iter, get the losses and print them out
                encDiscTrainLoss = self.encDLoss.eval({
                    self.x: thisX,
                    self.y: thisY,
                    self.z: thisZ
                })
                discTrainLoss = self.dLoss.eval({
                    self.x: thisX,
                    self.y: thisY,
                    self.z: thisZ
                })
                encTrainLoss = self.encLoss.eval({
                    self.x: thisX,
                    self.y: thisY,
                    self.z: thisZ
                })
                genTrainLoss = self.gLoss.eval({
                    self.x: thisX,
                    self.y: thisY,
                    self.z: thisZ
                })
                encDAccuracy = self.encDiscAccuracy.eval({
                    self.x: thisX,
                    self.y: thisY,
                    self.z: thisZ
                })
                dAccuracy = self.discAccuracy.eval({
                    self.x: thisX,
                    self.y: thisY,
                    self.z: thisZ
                })
                print("Encoder Discriminator Accuracy : {:.2f}...".format(
                    encDAccuracy))
                print("Discriminator Accuracy : {:.2f}...".format(dAccuracy))

                # check training next iteration
                trainEncD = True
                trainD = True
                if (encDAccuracy >= 0.8):
                    trainEncD = False
                if (dAccuracy >= 0.8):
                    trainD = False

                # print losses
                print(
                    "Encoder Discriminator Loss: {:.4f}...".format(
                        encDiscTrainLoss),
                    "Discriminator Loss: {:.4f}...".format(discTrainLoss),
                    "Encoder Loss: {:.4f}".format(encTrainLoss),
                    "Generator Loss: {:.4f}".format(genTrainLoss))

            # get sample
            thisSample, thisSampleLogit = sess.run(self.generator(
                self.z, reuse=True, isTraining=False),
                                                   feed_dict={self.z: sampleZ})

            # visualize and save it
            sampleName = 'gen_' + str(epoch)
            h.save_data(thisSample[0], sampleName, './samples', label_names,
                        label_colors, label_map_path, False)

            # save session
            print("SAVING SESSION! . . . ")
            self.saver.save(sess, './checkpoints/VAE_GAN', global_step=epoch)

        # return the losses
        return self.losses
示例#12
0
def pv_data(coordinates=None,
            address=None,
            tilt=30,
            solar_size_kw_dc=4500,
            inverter_size_kw=4500,
            inverter_efficiency=96,
            system_losses_perc_of_dc_energy=14,
            mount="Fixed (open rack)",
            module_type="Default [Standard]"):
    """
    :param coordinates: optional, set the coordinates of the location as (latitude, longitude)
    :param address: optional, set the address as a string
    :param tilt: integer, tilt angle default = 40
    :param solar_size_kw_dc: solar size in KW
    :param inverter_size_kw: inverter size in KW
    :param inverter_efficiency: inverter efficiency, default = 96 (96%)
    :param system_losses_perc_of_dc_energy: system losses, default = 14 (14%)
    :param mount: mount type, default = 'Fixed (open rack)'
    :param module_type: module type, default= "Default [Standard]"
    :return: hourly data of expected solar energy
    """
    # TODO:  Figure why inverter_size_kw, export_limit_ac_k_w, export_limit_ac_k_w, dataset,
    #  and annual_solar_degradation_perc_per_year are not implemented
    if coordinates:
        assert isinstance(coordinates,
                          tuple), 'coordinates should be in tuple format'
        latitude = coordinates[0]
        longitude = coordinates[1]
        location = "&" + "lat=" + str(latitude) + "&" + "lon=" + str(longitude)
        tilt = latitude
        verbose_dic = {'Coordinates': coordinates, 'Tilt Angle': int(tilt)}
    elif address:
        location = "&" + "address=" + address
        verbose_dic = {'Address': address, 'Tilt Angle': int(tilt)}
    else:
        raise Exception('either coordinates or address must be input')
    verbose_dic.update({
        'Solar System Size (kW DC)':
        solar_size_kw_dc,
        'Inverter Size (kW)':
        inverter_size_kw,
        'Inverter Efficiency':
        inverter_efficiency,
        'System Losses Percentage of DC Energy':
        system_losses_perc_of_dc_energy
    })
    save_data(verbose_dic,
              'solar_inputs',
              overwrite=True,
              description='Solar Inputs to View on Excel')
    # Solar Input Selection Setup and API_Key Setup
    pv_watts_api_key = "VTF48OxZfq7tlP4oriEDVK1qAnpOCPdzl0XGT2c0"
    mount_dict = {
        "Fixed (open rack)": 0,
        "Fixed (roof mount)": 1,
        "1-Axis Tracking": 2,
        "1-Axis Backtracking": 3,
        "2-Axis": 4,
        "Default [Fixed (open rack)]": 0
    }
    module_type_dict = {
        "Standard": 0,
        "Premium": 1,
        "Thin film": 2,
        "Default [Standard]": 0
    }
    array_azimuth_dict = {"135 (SE)": 135, "180 (S)": 180, "225 (SW)": 225}
    dataset = "nsrdb"

    array_azimuth = array_azimuth_dict["180 (S)"]
    mount = mount_dict[mount]
    module_type = module_type_dict[module_type]

    # Over a 25 year life
    annual_solar_degradation_perc_per_year = 0.5 * .01
    # If no export limit then state "No Limit"
    export_limit_ac_k_w = 3000
    variable = 1

    get_link = ''.join([
        "https://developer.nrel.gov/api/pvwatts/v6.json?" + "api_key=" +
        pv_watts_api_key,
        "&" + "system_capacity=" + str(solar_size_kw_dc),
        "&" + "module_type=" + str(module_type),
        "&" + "losses=" + str(system_losses_perc_of_dc_energy),
        "&" + "array_type=" + str(mount),
        "&" + "tilt=" + str(tilt),
        "&" + "azimuth=" + str(array_azimuth),
        location,
        "&" + "dataset=nsrdb",
        "&" + "timeframe=hourly",
        "&" + "inv_eff=" + str(inverter_efficiency),
    ])
    result = requests.get(get_link)
    data = result.json()
    outs_w = data["outputs"]["ac"]
    outs_kw = [out / 1000 for out in outs_w]
    outs_poa = data["outputs"]["poa"]
    outs_dn = data["outputs"]["dn"]
    outs_tamb = data["outputs"]["tamb"]
    outs_poa = pd.Series(outs_poa)
    outs_kw = pd.Series(outs_kw)
    outs_dn = pd.Series(outs_dn)
    outs_tamb = pd.Series(outs_tamb)
    df = load_data('hours_in_year')
    df['solar_output_kw'] = outs_kw.values
    df['solar_irradiance'] = outs_poa.values
    df['direct_normal_irradiance'] = outs_dn.values
    df['ambient_temperature'] = outs_tamb.values * 1.8000 + 32

    return df
示例#13
0
    return l_data, l_count, l_dictionary, l_reverse_dictionary


data, count, dictionary, reverse_dictionary = build_dataset(arg_words=words)
# print('l_data:', data)
# print('l_count:', count)
# print('l_dictionary:', dictionary)
# print('l_reverse_dictionary:', reverse_dictionary)
# 删除words节省内存
del words

data_index = 0

# 保存字典和符号表文件,以便使用模型时使用
helper.save_data(token=token_dict,
                 vocab_to_int=dictionary,
                 int_to_vocab=reverse_dictionary)


# 3.为skip-gram模型生产训练参数
def generate_batch(arg_batch_size, arg_num_skips, arg_skip_windows):
    global data_index

    l_batch = np.ndarray(shape=arg_batch_size,
                         dtype=np.int32)  # (1, arg_batch_size)
    l_labels = np.ndarray(shape=(arg_batch_size, 1),
                          dtype=np.int32)  # (arg_batch_size,1)
    span = 2 * arg_skip_windows + 1  # 示例[我 爱 祖国]
    buffer = collections.deque(maxlen=span)

    # 把词尽可能的随机分词组合
示例#14
0
def suf_processing(user, mapping, data_to_save):
    index_name = mapping['identifier'] + "_" + user['username']
    user = {
        "password": user['password']
    }
    helper.save_data(user=user, datas=data_to_save, index_name=index_name)