ml-trust-model

git clone git://git.codymlewis.com/ml-trust-model.git
Log | Files | Refs | README

commit c6bd717d27301b36745f6863db66e0a5c659fcbd
parent a035c45f6d5cc69836ee8a624d84f8482bfe6fca
Author: Cody Lewis <codymlewis@protonmail.com>
Date:   Fri, 19 Apr 2019 20:40:13 +1000

Added callbacks to ANN + some small refactoring

Diffstat:
M.gitignore | 1+
MREADME.md | 5+++++
MTrustManager/ANN.py | 37+++++++++++++++++++++----------------
MTrustManager/__init__.py | 48+++++++++++++++++++++++++++---------------------
MTrustModel.py | 6++++--
Alogs/events.out.tfevents.1555664174.ostium-veritas | 0
6 files changed, 58 insertions(+), 39 deletions(-)

diff --git a/.gitignore b/.gitignore @@ -396,3 +396,4 @@ Session.vim *.pdf *.gv *.h5 +logs/* diff --git a/README.md b/README.md @@ -8,6 +8,11 @@ pip3 install -r requirements.txt ``` ## Running +To run every using an ANN as the predictor: ``` python3 TrustModel.py ``` + +Running `python3 TrustModel.py -h` will give a help menu with the available options, +when the ANN is training you may run `tensorboard --logdir ./logs` to get a live graph +of the error curve and accuracy. diff --git a/TrustManager/ANN.py b/TrustManager/ANN.py @@ -12,28 +12,33 @@ Date: 2019-04-12 ''' -def create_and_train_ann(train_data, train_labels, test_data, test_labels): +def create_and_train_ann(train_data, train_labels, test_data, test_labels, model=None): ''' Create a neural network and train it on the given data. ''' - model = keras.models.Sequential() - model.add(keras.layers.Dense(128, input_shape=(3,))) - model.add(keras.layers.Activation('relu')) - model.add(keras.layers.Dense(128)) - model.add(keras.layers.Activation('relu')) - model.add(keras.layers.Dense(128)) - model.add(keras.layers.Activation('relu')) - model.add(keras.layers.Dense(64)) - model.add(keras.layers.Activation('relu')) - model.add(keras.layers.Dropout(0.5)) - model.add(keras.layers.Dense(3)) - model.add(keras.layers.Activation('sigmoid')) + if not model: + model = keras.models.Sequential() + model.add(keras.layers.Dense(128, input_shape=(4,))) + model.add(keras.layers.Activation('relu')) + model.add(keras.layers.Dense(128)) + model.add(keras.layers.Activation('relu')) + model.add(keras.layers.Dense(128)) + model.add(keras.layers.Activation('relu')) + model.add(keras.layers.Dense(64)) + model.add(keras.layers.Activation('relu')) + model.add(keras.layers.Dropout(0.5)) + model.add(keras.layers.Dense(3)) + model.add(keras.layers.Activation('sigmoid')) - adam = keras.optimizers.Adam(lr=0.0001) - model.compile(loss="mean_squared_error", optimizer=adam, metrics=['accuracy']) + adam = keras.optimizers.Adam(lr=0.0001) + model.compile(loss="mean_squared_error", optimizer=adam, metrics=['accuracy']) data = np.array(train_data + test_data) labels = skp.label_binarize(np.array(train_labels + test_labels), ["-1", "0", "1"]) - model.fit(x=data, y=labels, epochs=4000, validation_split=0.5) + tensorboard = keras.callbacks.TensorBoard(log_dir="./logs", histogram_freq=0, write_graph=True, write_images=False) + checkpointer = keras.callbacks.ModelCheckpoint("data/ANN.h5", monitor='val_loss', verbose=1, + save_best_only=False, save_weights_only=False, + mode='auto', period=1) + model.fit(x=data, y=labels, epochs=500, validation_split=0.5, callbacks=[tensorboard, checkpointer]) return model diff --git a/TrustManager/__init__.py b/TrustManager/__init__.py @@ -137,7 +137,7 @@ class TrustManager: f"{reports_from_node_i[0]},{reports_on_node_j[0]},{reports_on_node_j[1].csv_output()}\n" ) - def train(self): + def train(self, cont): ''' Train the predictor. ''' @@ -145,7 +145,7 @@ class TrustManager: self.evolve_svm() self.load_svms() else: - self.train_ann() + self.train_ann(cont) self.load_ann() def evolve_svm(self): @@ -173,18 +173,17 @@ class TrustManager: joblib.dump(svms, "data/SVMs.pkl") print() - def train_ann(self): + def train_ann(self, cont): ''' Train the artificial neural network. ''' - train_data, train_notes = read_data(self.__train_filename) - test_data, test_notes = read_data(self.__test_filename) + train_data, train_notes = read_data(self.__train_filename, dict_mode=False) + test_data, test_notes = read_data(self.__test_filename, dict_mode=False) - if not os.path.exists("data/ANN"): - os.makedirs("data/ANN") - for reporter_id, _ in train_data.items(): - ANN.create_and_train_ann(train_data[reporter_id], train_notes[reporter_id], - test_data[reporter_id], test_notes[reporter_id]).save(f"data/ANN/{reporter_id}.h5") + if cont and os.path.exists("data/ANN.h5"): + self.load_ann() + ANN.create_and_train_ann(train_data, train_notes, test_data, test_notes, + model=self.__predictor).save("data/ANN.h5") def load_svms(self): ''' @@ -197,8 +196,7 @@ class TrustManager: Load the neural network classifier. ''' self.__predictor = dict() - for node_id in range(len(self.__network)): - self.__predictor[node_id] = keras.models.load_model(f"data/ANN/{node_id}.h5") + self.__predictor = keras.models.load_model(f"data/ANN.h5") def get_all_recommendations(self, service_target, capability_target): ''' @@ -367,22 +365,30 @@ def load(train_filename, test_filename, use_svm): return trust_manager -def read_data(filename, delimiter=","): +def read_data(filename, delimiter=",", dict_mode=True): ''' Read data from a csv of reports. ''' - train_data = dict() - notes = dict() + if dict_mode: + train_data = dict() + notes = dict() + else: + train_data = [] + notes = [] with open(filename) as report_csv: csv_reader = csv.reader(report_csv, delimiter=delimiter) for row in csv_reader: - reporter_id = int(row[0]) - if train_data.get(reporter_id): - train_data[reporter_id].append(row[1:-1]) - notes[reporter_id].append(row[-1]) + if dict_mode: + reporter_id = int(row[0]) + if train_data.get(reporter_id): + train_data[reporter_id].append(row[1:-1]) + notes[reporter_id].append(row[-1]) + else: + train_data[reporter_id] = [row[1:-1]] + notes[reporter_id] = [row[-1]] else: - train_data[reporter_id] = [row[1:-1]] - notes[reporter_id] = [row[-1]] + train_data.append(row[:-1]) + notes.append(row[-1]) return train_data, notes diff --git a/TrustModel.py b/TrustModel.py @@ -27,6 +27,8 @@ if __name__ == '__main__': help="Use an ann as the predictor [default predictor]") PARSER.add_argument("-t", "--train", dest="train", action="store_const", const=True, default=False, help="Train the predictor on the previously generated data") + PARSER.add_argument("-co", "--continue", dest="cont", action="store_const", const=True, default=False, + help="Continue training the ann.") PARSER.add_argument("-tr", "--transact", dest="transact", action="store", nargs=3, type=int, metavar=("ID", "SERVICE", "CAPABILITY"), help="Simulate a single transaction for node ID for SERVICE at CAPABILITY and print out the trusted list.") @@ -53,7 +55,7 @@ if __name__ == '__main__': train_filename=TRAIN_FILENAME, test_filename=TEST_FILENAME, use_svm=ARGS.use_svm ) TRUST_MANAGER.bootstrap(ARGS.epochs) - TRUST_MANAGER.train() + TRUST_MANAGER.train(ARGS.cont) TRUST_MANAGER.graph_recommendations(1, 1, 1) BAD_PER, OK_PER, GOOD_PER = TRUST_MANAGER.simulate_transactions(ARGS.epochs) print(f"Percentage of bad transactions: {BAD_PER}") @@ -78,7 +80,7 @@ if __name__ == '__main__': if ARGS.train: print("Training...") TRUST_MANAGER = TrustManager.load(TRAIN_FILENAME, TEST_FILENAME, ARGS.use_svm) - TRUST_MANAGER.train() + TRUST_MANAGER.train(ARGS.cont) if ARGS.transact: TRUST_MANAGER = TrustManager.load(TRAIN_FILENAME, TEST_FILENAME, ARGS.use_svm) diff --git a/logs/events.out.tfevents.1555664174.ostium-veritas b/logs/events.out.tfevents.1555664174.ostium-veritas Binary files differ.