Skip to content
Snippets Groups Projects
service.py 3.41 KiB
Newer Older
##
# This program has been developed by students from the bachelor Computer Science at Utrecht University within the Software Project course.
# © Copyright Utrecht University (Department of Information and Computing Sciences)
##

#!/usr/bin/env python
import os
import sys
import inspect
import networkx as nx
import json

# Importing in Python is rather complicated when attempting to adhere to clean code architecture
# These few lines set the import paths so Docker can actually build the images without having to resort to copy paste-ing
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from MLBasicFunctions import addNewEdgeMetaData, buildGraph
from MLRepositoryInterface import MLServerInterface

# We often compare against this specific string, so we clearly define it to prevent any typos
# Python has no constants so we simply give it a very obvious name that implies it is not supposed to be changed
ML_QUEUE_NAME = "lpr_queue"
SERVICE_NAME = "linkPrediction"

##
# MLServer implements the MLServerInterface interface
##
    # __init__ initialises the MLServer with the proper parameters
    #   self: Self@MLServer, the MLServer implementation
    #   Return: None, return nothing
    ##
    def __init__(self):
        # Fill in the parameters for communication with the algorithm here
        super().__init__(SERVICE_NAME, ML_QUEUE_NAME)
    ##
    # decodeMessage builds a NetworkX graph based on the incoming query data
    #   self: Self@MLServer, the MLServer implementation
    #   incomingQueryData: Any, the incoming query data in JSON format
    #   Return: Graph, the NetworkX graph
    ##
    def decodeMessage(self, incomingQueryData):
        graph = buildGraph(incomingQueryData)
    ##
    # adjustData adjusts the machine learning results into a format we can work with
    #   self: Self@MLServer, the MLServer implementation
    #   mlResult: Any, the result produced by the machine learning algorithm
    #   Return: list, a list of formatted edges
    ##
    def adjustData(self, data):
        list = []
        for index, (fr, to, value) in enumerate(data):
            if value > 0:
                dict = {}
                dict["attributes"] = {}
                dict["attributes"]["jaccard_coefficient"] = value
                dict["from"] = fr
                dict["id"] = "link_prediction_relation/" + str(index)
                dict["to"] = to
                list.append(dict)
    # __call__ takes an incoming message and applies the machine learning algorithm and data transformations
    #   self: Self@MLServer, the MLServer implementation
    #   body: Any, the body of the incoming RabbitMQ message
    #   Return: str, a formatted JSON string of the query result after the application of a machine learning algorithm
    ##
    def __call__(self, body):
        # Decode the incoming RabbitMQ message
        incomingQueryData = json.loads(body.decode())
        # Log the queryID and type
        print(incomingQueryData["queryID"])
        print(incomingQueryData["type"])
        # Decode the incoming query data into a NetworkX graph
        G = self.decodeMessage(incomingQueryData)
        # This is where the specific algorithm is actually called
        data = nx.jaccard_coefficient(G)

        return self.adjustData(data)