diff --git a/cypher/convertQueryNew.go b/cypher/convertQueryNew.go
new file mode 100644
index 0000000000000000000000000000000000000000..151a4b9fd2df04154c97fa24fe8743fd5b720173
--- /dev/null
+++ b/cypher/convertQueryNew.go
@@ -0,0 +1,199 @@
+package cypher
+
+import (
+	"fmt"
+
+	"git.science.uu.nl/graphpolaris/query-conversion/entity"
+)
+
+func (s *Service) ConvertQuery2(totalJSONQuery *entity.IncomingQueryJSON) (*string, error) {
+
+	ok, err := performBasicHealthCheck(totalJSONQuery)
+	if !ok {
+		// Kind of a placeholder for better error handling
+		return nil, err
+	}
+
+	finalCypher := make([]string, 0)
+
+	// ** CHECK THAT THEY ARE NOT EQUAL IF RECURSED OTHERWISE FINAL RETURN WONT WORK
+	queryJSON := totalJSONQuery
+
+	// Flattened out recursion on the JSON
+	for {
+		query, rest, isRest := checkForQueryCluster(queryJSON)
+		ok, err := checkQueryValidity(query)
+		if ok {
+			finalCypher = append(finalCypher, *createCypher(query))
+		} else {
+			// do something with the error
+			fmt.Println(err)
+		}
+
+		if !isRest {
+			break
+		} else {
+			queryJSON = rest
+		}
+	}
+
+	// Okay nadenktijd: Kan deze wel achteraan, want wil je de dingen die boven een groupby staan wel gereturned hebben?
+	// Zo nee, dan moet je dus eerst weten wat er uberhaupt gereturned moet worden, aka een hierarchy functie
+	finalCypher = append(finalCypher, *createReturnStatement(totalJSONQuery))
+
+	// Code that checks if all pills are connected
+	// if so: build the query
+
+	// else:
+	// Code that checks to see if the disconnected pieces are valid queries
+	// code that builds the queries
+}
+
+// Very placeholdery
+func magicHierarchyFunction(JSONQuery *entity.IncomingQueryJSON) []pdictList {
+	return nil
+}
+
+// createCypher creates queries without the return statement, due to the possibility of multiple disconnected queries
+func createCypher(JSONQuery *entity.IncomingQueryJSON) *string {
+	queryHierarchy := magicHierarchyFunction(JSONQuery)
+
+	/*
+		Match (deel 1)
+		Constraints op entities
+		Unwind as r0
+		With *
+		Constraints op r0
+
+		Dan weer door
+	*/
+}
+
+// NOTE MOET MISSCHIEN ANDERS
+// createReturnStatement creates the final return statement, connecting all previous cypher together
+func createReturnStatement(JSONQuery *entity.IncomingQueryJSON) *string {
+	// Hier dus weer de vraag of dingen boven een GROUP BY gereturned dienen te worden Lijkt mij niet
+}
+
+func createFilterStatement(filter *entity.QueryFilterStruct) *string {
+	// Ik neem aan dat de 'r0 = relationships(p0)' al reeds gedeclared is
+
+}
+
+type queryPart struct {
+	qType        string // Eg if it is a relation or groupby
+	qID          int    // ID of said relation/gb
+	partID       int    // Custom ID used for dependancy
+	dependancies []int  // List of partID's that need to come before
+}
+
+type query []queryPart
+
+func (q query) find(qID int, qType string) *queryPart {
+	for _, part := range q {
+		if part.qID == qID && part.qType == qType {
+			return &part
+		}
+	}
+	return nil
+}
+
+func createQueryHierarchy(JSONQuery *entity.IncomingQueryJSON) {
+	// Pak de relations met de entities samen, en vorm groepjes van ent-rel-ent
+	// Als A-rel-B-rel-C, dan wordt dat A-rel-B en B-rel-C, waarbij BC na AB moet komen, dus BC _depends_ on AC
+	// Idee is dat je de hele lijst achterstevoren door kan lopen en dat je eerst een depend tegen komt en daarna het gene waarop gedepend wordt
+
+	var parts query
+	IDctr := 0
+
+	// Add them all to query parts
+	for _, rel := range JSONQuery.Relations {
+		part := queryPart{
+			qType:        "relation",
+			qID:          rel.ID,
+			partID:       IDctr,
+			dependancies: make([]int, 0),
+		}
+		parts = append(parts, part)
+
+		IDctr++
+
+	}
+
+	for _, gb := range JSONQuery.GroupBys {
+		part := queryPart{
+			qType:        "groupBy",
+			qID:          gb.ID,
+			partID:       IDctr,
+			dependancies: make([]int, 0),
+		}
+		parts = append(parts, part)
+
+		IDctr++
+
+	}
+
+	// Check dependancies in a nice O(n^2)
+	for _, rel := range JSONQuery.Relations {
+		if rel.FromID == -1 {
+			continue
+		}
+
+		// Check the dependancies From - To
+		for _, rela := range JSONQuery.Relations {
+			if rela.ToID == -1 {
+				continue
+			}
+
+			if rel.FromID == rela.ToID && rel.FromType == "relation" {
+				part := parts.find(rel.ID, "relation")
+				part.dependancies = append(part.dependancies, parts.find(rela.ID, "relation").partID)
+			}
+		}
+
+		if rel.ToID == -1 {
+			continue
+		}
+
+		// Now for connections to group by's it doesnt matter if the GB is attached to the from or the to
+		// The GB always has priority
+		for _, gb := range JSONQuery.GroupBys {
+			if (rel.FromID == gb.ID && rel.FromType == "groupBy") || (rel.ToID == gb.ID && rel.ToType == "groupBy") {
+				part := parts.find(rel.ID, "relation")
+				part.dependancies = append(part.dependancies, parts.find(gb.ID, "groupBy").partID)
+			}
+		}
+	}
+
+	// Same trick for group by's
+	for _, gb := range JSONQuery.GroupBys {
+		for _, rela := range JSONQuery.Relations {
+			// Check if the gb is connected to the relation
+			if (gb.ByID == rela.ID && gb.ByType == "relation") || // Is the By connected to a relation
+				(gb.GroupID == rela.ID && gb.GroupType == "relation") || // is the Group connected to a relation
+				((gb.ByID == rela.FromID || gb.ByID == rela.ToID) && gb.ByType == "entity") || // Is the by connected to an entity connected to the relation
+				((gb.GroupID == rela.FromID || gb.GroupID == rela.ToID) && gb.GroupType == "entity") { // Is the Group connected to an entity connected to the relation
+				part := parts.find(gb.ID, "groupBy")
+				part.dependancies = append(part.dependancies, parts.find(rela.ID, "relation").partID)
+			}
+		}
+
+	}
+
+	// ** MODIFIERS?
+
+	// Maak van alle rels en gb's een query part
+	// Loop door alle rels heen en kijk of hun FROM een TO is van een andere rel --> dependancy
+	// Als de from of de to een Group by is, dan is ie ook direct dependant
+	// Als een GB aan een relation (A) zit, komen alle andere relations die aan A vastzitten ook eerst
+
+	// ** Het geval van wanneer een entity vast zit aan een group by? via een IN?
+
+	// Returned I guess een list van query parts met de dependancies naar beneden zodat je van boven naar beneden de lijst kan doorlopen
+	// om de query te maken
+
+	// Na dit, dus tijdens het maken van de query per relation/group by kijken of er een filter op een van de connections zit
+	// Zie namelijk niet een practisch nut om nu al de filters hierin te stoppen, want ze kunnen vgm maar op 1 manier
+
+	// ** CONTROLEER HOE EEN FILTER WERKT OP EEN RELATION ALLEEN, want dan leeft ie nml niet op een connectie
+}
diff --git a/cypher/healthChecks.go b/cypher/healthChecks.go
new file mode 100644
index 0000000000000000000000000000000000000000..14a7b3a0186f97bca3523fd9f9d90f28fbc0c1cc
--- /dev/null
+++ b/cypher/healthChecks.go
@@ -0,0 +1,258 @@
+package cypher
+
+import (
+	"errors"
+	"fmt"
+
+	"git.science.uu.nl/graphpolaris/query-conversion/entity"
+)
+
+func performBasicHealthCheck(JSONQuery *entity.IncomingQueryJSON) (bool, error) {
+
+	// Check to make sure all indexes exist
+	// How many entities are there
+	numEntities := len(JSONQuery.Entities) - 1
+	// How many relations there are
+	numRelations := len(JSONQuery.Relations) - 1
+
+	// Make sure no entity should be returned that is outside the range of that list
+	for _, e := range JSONQuery.Return.Entities {
+		// If this entity references an entity that is outside the range
+		if e > numEntities || e < 0 {
+			return false, errors.New("non-existing entity referenced in return")
+		}
+	}
+
+	// Make sure that no relation mentions a non-existing entity
+	for _, r := range JSONQuery.Relations {
+		if r.FromID > numEntities || r.ToID > numEntities {
+			return false, errors.New("non-exisiting entity referenced in relation")
+		}
+	}
+
+	// Make sure no non-existing relation is tried to be returned
+	for _, r := range JSONQuery.Return.Relations {
+		if r > numRelations || r < 0 {
+			return false, errors.New("non-existing relation referenced in return")
+		}
+	}
+
+	return true, nil
+}
+
+// checkForQueryCluster will detect (and separate?) if there are multiple queries in the query panel and will try to sepate the queries.
+// Maybe also delete floating pills that have no connection (but that is a different function)
+func checkForQueryCluster(JSONQuery *entity.IncomingQueryJSON) (*entity.IncomingQueryJSON, *entity.IncomingQueryJSON, bool) {
+
+	// Notes naar moizelf: alle pills die dingen aan elkaar verbinden zijn relations en group bys, filter lijken 'op' een lijntje te leven, maar niet als schakel te dienen
+	// Dit zou recursief kunnen: vind een cluster in de json, haal alles uit de json dat verbonden zit aan die cluster, voer de functie opnieuw uit op het restand
+
+	cluster := make(map[string]bool) // aka een set (entities e0 e1 e2, relations r0 .., groub by g0 ..)
+
+	// Dit is het startpunt van de cluster, vrij veel if elses ivm half afgemaakte queries
+	// Lots of existance checks
+	if len(JSONQuery.Relations) > 0 {
+		rel := fmt.Sprintf("r%v", JSONQuery.Relations[0].ID)
+		cluster[rel] = true
+
+		if JSONQuery.Relations[0].ToID != -1 {
+
+			// Take the first letter: entities with ID 0 -> e0
+			to := fmt.Sprintf("%v%v", JSONQuery.Relations[0].ToType[0], JSONQuery.Relations[0].ToID)
+			cluster[to] = true
+		}
+
+		if JSONQuery.Relations[0].FromID != -1 {
+			from := fmt.Sprintf("%v%v", JSONQuery.Relations[0].FromType[0], JSONQuery.Relations[0].FromID)
+			cluster[from] = true
+		}
+
+	} else if len(JSONQuery.GroupBys) > 0 {
+		gb := fmt.Sprintf("g%v", JSONQuery.GroupBys[0].ID)
+		cluster[gb] = true
+
+		// TODO: Wat te doen als de groupby niet goed is aangesloten, want dat crasht ie nogal atm
+		group := fmt.Sprintf("%v%v", JSONQuery.GroupBys[0].GroupType[0], JSONQuery.GroupBys[0].GroupID)
+		cluster[group] = true
+
+		by := fmt.Sprintf("%v%v", JSONQuery.GroupBys[0].ByType[0], JSONQuery.GroupBys[0].ByID)
+		cluster[by] = true
+
+	} else if len(JSONQuery.Modifiers) > 0 {
+		// I guess dat je ook een enkele entity met bepaalde constraints kan tellen ofzo? of kan averagen
+		// TODO
+	}
+
+	// Relation toevoegen aan de map
+	// Is er geen relation doe dan groupby
+	// is die er ook niet dan rip
+	for {
+		stop := true
+
+		// kijk langs alle relations en group bys of ie verbonden is aan de cluster en nog niet in de set zit
+		// Is dat zo run m opnieuw en kijk of daar weer dingen aan verbonden zijn
+
+		for _, rel := range JSONQuery.Relations {
+			// check of de rel er al in zit, dan kan ie geskipped worden
+			// zo nee kijk of een van de entities of group by's erin zit, dan is deze dus verbonden
+
+			rela := fmt.Sprintf("r%v", rel.ID)
+			if cluster[rela] {
+				// If it is already in the cluster then we dont care
+				continue
+			}
+
+			partOfCluster := false
+			// Now comes the check to see if one of its endpoints is in the cluster, meaning everything is in the cluster
+			if rel.ToID != -1 {
+				to := fmt.Sprintf("%v%v", rel.ToType[0], rel.ToID)
+
+				if cluster[to] {
+					partOfCluster = true
+				}
+			}
+
+			if rel.FromID != -1 {
+				from := fmt.Sprintf("%v%v", rel.FromType[0], rel.FromID)
+				cluster[from] = true
+
+				if cluster[from] {
+					partOfCluster = true
+				}
+			}
+
+			if partOfCluster {
+				if rel.ToID != -1 {
+					to := fmt.Sprintf("%v%v", rel.ToType[0], rel.ToID)
+					cluster[to] = true
+				}
+
+				if rel.FromID != -1 {
+					from := fmt.Sprintf("%v%v", rel.FromType[0], rel.FromID)
+					cluster[from] = true
+				}
+
+				stop = false
+			}
+
+		}
+
+		// Now the same for Group by's
+		for _, gb := range JSONQuery.GroupBys {
+			gby := fmt.Sprintf("g%v", gb.ID)
+
+			if cluster[gby] {
+				continue
+			}
+
+			// It should have been checked that the connections of the group by are valid, since a group by must have all connections filled (in contrary of a relation)
+
+			group := fmt.Sprintf("%v%v", gb.GroupType[0], gb.GroupID)
+			by := fmt.Sprintf("%v%v", gb.ByType[0], gb.ByID)
+
+			if cluster[group] || cluster[by] {
+				cluster[gby] = true
+				cluster[group] = true
+				cluster[by] = true
+				stop = false
+			}
+
+		}
+
+		// ** then for modifiers? although modifiers havent changed yet, since their results must also be used in queries
+		// Modifiers will change, so that is a problem for later
+
+		if stop {
+			// No new entities were added to the cluster, thus it is finished
+			break
+		}
+	}
+
+	// Now walk through the JSON and divide it into the cluster and rest
+	restJSON := entity.IncomingQueryJSON{DatabaseName: JSONQuery.DatabaseName, Limit: JSONQuery.Limit}
+	clusterJSON := entity.IncomingQueryJSON{DatabaseName: JSONQuery.DatabaseName, Limit: JSONQuery.Limit}
+	isRest := false
+
+	// Loop through entities
+	for _, ent := range JSONQuery.Entities {
+		name := fmt.Sprintf("e%v", ent.ID)
+
+		if cluster[name] {
+			clusterJSON.Entities = append(clusterJSON.Entities, ent)
+			clusterJSON.Return.Entities = append(clusterJSON.Return.Entities, ent.ID)
+		} else {
+			restJSON.Entities = append(restJSON.Entities, ent)
+			restJSON.Return.Entities = append(restJSON.Return.Entities, ent.ID)
+			isRest = true
+		}
+	}
+
+	// Loop through relations
+	for _, rel := range JSONQuery.Relations {
+		name := fmt.Sprintf("r%v", rel.ID)
+
+		if cluster[name] {
+			clusterJSON.Relations = append(clusterJSON.Relations, rel)
+			clusterJSON.Return.Relations = append(clusterJSON.Return.Relations, rel.ID)
+		} else {
+			restJSON.Relations = append(restJSON.Relations, rel)
+			restJSON.Return.Relations = append(restJSON.Return.Relations, rel.ID)
+			isRest = true
+		}
+	}
+
+	// Loop through groupby's
+	for _, gb := range JSONQuery.GroupBys {
+		name := fmt.Sprintf("g%v", gb.ID)
+
+		if cluster[name] {
+			clusterJSON.GroupBys = append(clusterJSON.GroupBys, gb)
+			clusterJSON.Return.GroupBys = append(clusterJSON.Return.GroupBys, gb.ID)
+		} else {
+			restJSON.GroupBys = append(restJSON.GroupBys, gb)
+			restJSON.Return.GroupBys = append(restJSON.Return.GroupBys, gb.ID)
+			isRest = true
+		}
+	}
+
+	// ** Loop through modifiers
+
+	// Loop through filters
+	// Filters were not done in the clustering, since they live on top of a connection, meaning they do not extend the cluster
+	// This also means that if a From or a To is in the cluster, the other (and thus the filter) is in the cluster as well
+	for _, filter := range JSONQuery.Filters {
+		from := fmt.Sprintf("%v%v", filter.FromType[0], filter.FromID)
+
+		if cluster[from] {
+			clusterJSON.Filters = append(clusterJSON.Filters, filter)
+		} else {
+			restJSON.Filters = append(restJSON.Filters, filter)
+			isRest = true
+		}
+	}
+
+	return &clusterJSON, &restJSON, isRest
+
+	// Nadat cluster is gevonden: maak twee nieuwe jsons aan: cluster en rest
+	// Loop door de OG json en voeg alles aan of de cluster of de rest toe
+	// Return cluster, rest en een bool die zegt of er een cluster is
+	// Wss is het in 99% van de gevallen maar 1 cluster of een cluster met een verdwaalde node, maar toch
+}
+
+// ** MOGELIJK OBSOLETE, hangt af van de hierarchiefunctie
+/* checkQueryValidity performs checks to see if the query is valid.
+
+Returns a boolean indicating if the query is valid, the error will containt a custom message saying what is wrong with the query.
+It is obviously possible the query is still invalid, but that is for the database to find out.
+*/
+func checkQueryValidity(JSONQuery *entity.IncomingQueryJSON) (bool, error) {
+
+	// The first test is to see if there are at least 2 returns, since only one return is not possible (we do not allow relation only queries)
+	ret := JSONQuery.Return
+	numOfReturns := len(ret.Entities) + len(ret.GroupBys) + len(ret.Relations)
+	if numOfReturns < 2 {
+		return false, errors.New("Insufficient return values")
+	}
+
+	return true, nil
+}
diff --git a/cypher/hierarchy.go b/cypher/hierarchy.go
new file mode 100644
index 0000000000000000000000000000000000000000..b21b4d3e16b5b4c07d9da0f3e0d239a9a3a32092
--- /dev/null
+++ b/cypher/hierarchy.go
@@ -0,0 +1,504 @@
+// DIT IS LEGACY CODE VAN SWP DIE IK STRAKS OOK NODIG HEB, MAAR IN ZN HUIDIGE STAAT GEBRUIK IK HET NIET
+// ENKEL TER REFERENTIE
+// Want global scope vars, YIKES
+
+package cypher
+
+import (
+	"fmt"
+	"strconv"
+
+	"git.science.uu.nl/graphpolaris/query-conversion/entity"
+)
+
+type pdict struct {
+	typename string
+	pointer  int
+}
+
+type pdictList []pdict
+
+func (p pdictList) Len() int {
+	return len(p)
+}
+
+func (p pdictList) Less(i, j int) bool {
+	if p[i].typename < p[j].typename {
+		return true
+	} else if p[i].typename == p[j].typename && p[i].pointer < p[j].pointer {
+		return true
+	} else {
+		return false
+	}
+}
+
+func (p pdictList) Swap(i, j int) {
+	p[i], p[j] = p[j], p[i]
+}
+
+var listoflists []pdictList
+var reldone map[int]bool
+var entdone map[int]bool
+var funcdone map[int]bool
+var relfuncdone map[int]bool
+var filterDone map[int]bool
+
+func search(JSONQuery *entity.IncomingQueryJSON, index int) {
+	listoflists = []pdictList{}
+	reldone = make(map[int]bool)
+	entdone = make(map[int]bool)
+	funcdone = make(map[int]bool)
+	relfuncdone = make(map[int]bool)
+	filterDone = make(map[int]bool)
+	var s pdictList
+	//printSlice(s)
+	//layercounter = 0
+
+	initent := pdict{
+		typename: "entity",
+		pointer:  index,
+	}
+
+	s = append(s, initent)
+	listoflists = append(listoflists, s)
+	EntToRel(JSONQuery, initent)
+
+	for i := range listoflists {
+		for j := range listoflists[i] {
+			fmt.Println(listoflists[i][j])
+		}
+		fmt.Println("")
+	}
+	AddFilters(JSONQuery)
+	fmt.Println(listoflists)
+
+}
+
+/*
+RelToEnt Get the entities connected to a relation and recursivly constructs part of the hierarchy
+Entities always get added IN FRONT OF their respective relation in the hierarchy
+	JSONQuery: *entity.IncomingQueryJSON, the query in JSON format
+	rel: pdict, the relation to find all connected entities for
+*/
+func RelToEnt(JSONQuery *entity.IncomingQueryJSON, rel pdict) {
+	var newlist pdictList
+	layercounter := FindCurrentLayer(listoflists, rel)
+	// Loop over all entities
+	// If an entity is already in the entdone dict we already added it to the hierarchy, so we don't have to add it again
+	// If an entity matches either the from or to in a relation we can add it to the newlist
+	for i := range JSONQuery.Entities {
+		if _, ok := entdone[i]; !ok {
+			if JSONQuery.Relations[rel.pointer].FromID == i && JSONQuery.Relations[rel.pointer].FromType == "entity" {
+				fromentity := pdict{
+					typename: "entity",
+					pointer:  i,
+				}
+				newlist = append(newlist, fromentity)
+			} else if JSONQuery.Relations[rel.pointer].ToID == i && JSONQuery.Relations[rel.pointer].ToType == "entity" {
+				toentity := pdict{
+					typename: "entity",
+					pointer:  i,
+				}
+				newlist = append(newlist, toentity)
+			}
+		}
+	}
+	// This relation has found all its entities so we can set it's ID to true
+	reldone[rel.pointer] = true
+	// If the newlist is empty, we can just skip the recursion
+	// This is effectively our base case
+	if len(newlist) != 0 {
+		// If our layercounter is equal to 0 we are in the first "layer" of the hierarchy
+		// Because we add the entities IN FRONT OF their respective relation we don't have to move the layercounter before prepending
+		// If our layercounter is not equal to 0 we lower the layercounter and then add each item to the newly selected layer
+		if layercounter == 0 {
+			listoflists = prepend(listoflists, newlist)
+			fmt.Println("RelToEnt Layercounter 0 prepend entity")
+
+		} else {
+			layercounter--
+			for i := range newlist {
+				listoflists[layercounter] = append(listoflists[layercounter], newlist[i])
+
+				fmt.Println("RelToEnt Layercounter " + strconv.Itoa(layercounter) + " append to layer above us, appending type: " + newlist[i].typename + " with pointer: " + strconv.Itoa(newlist[i].pointer))
+			}
+
+		}
+
+		// After getting a list of entities we can only go towards a list of relation
+		// So we recurse by calling EntToRel
+		for i := range newlist {
+			fmt.Println("EntToRel being called with index?: " + strconv.Itoa(newlist[i].pointer))
+			EntToRel(JSONQuery, newlist[i])
+
+		}
+	}
+
+}
+
+/*
+EntToRel Get the relations connected to a entity and recursivly constructs part of the hierarchy
+Relation always get added BEHIND their respective entity in the hierarchy
+	JSONQuery: *entity.IncomingQueryJSON, the query in JSON format
+	ent: pdict, the entity to find all connected relations for
+*/
+func EntToRel(JSONQuery *entity.IncomingQueryJSON, ent pdict) {
+	var newlist pdictList
+	layercounter := FindCurrentLayer(listoflists, ent)
+	// Loop over all relations
+	// If a relation is already in the reldone dict we already added it to the hierarchy, so we don't have to add it again
+	// If a relation matches either the from or to with the entity we can add it to the newlist
+	for i := range JSONQuery.Relations {
+		if _, ok := reldone[i]; !ok {
+			if JSONQuery.Relations[i].FromID == ent.pointer && JSONQuery.Relations[i].FromType == "entity" {
+				rel := pdict{
+					typename: "relation",
+					pointer:  i,
+				}
+				newlist = append(newlist, rel)
+			} else if JSONQuery.Relations[i].ToID == ent.pointer && JSONQuery.Relations[i].ToType == "entity" {
+				rel := pdict{
+					typename: "relation",
+					pointer:  i,
+				}
+				newlist = append(newlist, rel)
+			}
+		}
+	}
+	// This entity has found all its relations so we can set it's ID to true
+	entdone[ent.pointer] = true
+
+	if len(newlist) != 0 {
+		// If our layercounter is equal to the length of the hierarchy - 1 we are in the last "layer" of the hierarchy
+		// Because we add the relations BEHIND their respective entities we don't have to move the layercounter before appending
+		// TODO TAKE OUT UNNEEDED LAYERCOUNTER INCREMENTS AND DECREMENTS
+		// If our layercounter is any other value we increase the layercounter and then add each item to the newly selected layer
+		if layercounter == len(listoflists)-1 {
+			listoflists = append(listoflists, newlist)
+			layercounter++
+			fmt.Println("EntToRel Layercounter last appending below: type relation")
+		} else {
+			layercounter++
+			for i := range newlist {
+				listoflists[layercounter] = append(listoflists[layercounter], newlist[i])
+				fmt.Println("EntToRel Layercounter " + strconv.Itoa(layercounter) + " append to layer below us, appending type: " + newlist[i].typename + " with pointer: " + strconv.Itoa(newlist[i].pointer))
+			}
+
+		}
+
+		// After getting a list of relations we can only go towards a list of entities or a list of functions
+		// So we recurse by calling RelToEnt and RelToAllFunc
+		for i := range newlist {
+			fmt.Println("RelToEnt being called with index?: " + strconv.Itoa(newlist[i].pointer))
+			RelToEnt(JSONQuery, newlist[i])
+			fmt.Println("RelToAllFunc being called with index?: " + strconv.Itoa(newlist[i].pointer))
+			RelToAllFunc(JSONQuery, newlist[i])
+
+		}
+	}
+}
+
+/*
+RelToAllFunc Get the functions connected (both functions that are applied to a subquery a relation is a part of and functions the relation is connected to itself)
+ to a relation and recursivly constructs part of the hierarchy
+If a function is applied to a subquery the relation is a part of, we add it BEHIND its respective relation
+If a function is connected to a relation (relation uses the results from the function), we add it IN FRONT OF its respective relation
+	JSONQuery: *entity.IncomingQueryJSON, the query in JSON format
+	rel: pdict, the relation to find all connected functions for
+*/
+func RelToAllFunc(JSONQuery *entity.IncomingQueryJSON, rel pdict) {
+	var funcappliedtosubquery pdictList
+	var functowhichrelapplies pdictList
+	layercounter := FindCurrentLayer(listoflists, rel)
+	// Loop over all functions
+	// If a relation is already in the relfuncdone dict we already added it to the hierarchy, so we don't have to add it again
+	// If a function's relationID matches the current relation then the function is applied to a subquery
+	// If the relation's functionpointer matches a function's ID then the relation is connected to the function
+	// Depending on the case they get put in a different list and are put in different places in the hierarchy
+	for i := range JSONQuery.GroupBys {
+		if _, ok := relfuncdone[rel.pointer]; !ok {
+			if _, ok := funcdone[i]; !ok {
+				if JSONQuery.GroupBys[i].RelationID == rel.pointer {
+					relfunc := pdict{
+						typename: "groupBy",
+						pointer:  i,
+					}
+					funcappliedtosubquery = append(funcappliedtosubquery, relfunc)
+					fmt.Println("I AM HERE 1")
+				}
+
+				if JSONQuery.Relations[rel.pointer].FromID == i && JSONQuery.Relations[rel.pointer].FromType == "groupBy" {
+					fromfunc := pdict{
+						typename: "groupBy",
+						pointer:  i,
+					}
+					functowhichrelapplies = append(functowhichrelapplies, fromfunc)
+					fmt.Println("I AM HERE 2")
+
+				} else if JSONQuery.Relations[rel.pointer].ToID == i && JSONQuery.Relations[rel.pointer].ToType == "groupBy" {
+					tofunc := pdict{
+						typename: "groupBy",
+						pointer:  i,
+					}
+					functowhichrelapplies = append(functowhichrelapplies, tofunc)
+					fmt.Println("I AM HERE 3")
+
+				}
+
+			}
+		}
+
+	}
+	relfuncdone[rel.pointer] = true
+	layercountertwo := layercounter
+	layercounterthree := layercounter
+	// See main function comment to see which sublist gets put where in the hierarchy
+	if len(functowhichrelapplies) != 0 {
+
+		if layercountertwo == 0 {
+			listoflists = prepend(listoflists, functowhichrelapplies)
+			fmt.Println("RellToAllFunc Layercounter 0 prepend, prepending functowhichrelapplies")
+
+		} else {
+			layercountertwo--
+			for i := range functowhichrelapplies {
+				listoflists[layercountertwo] = append(listoflists[layercountertwo], functowhichrelapplies[i])
+				fmt.Println("RellToAllFunc Layercounter " + strconv.Itoa(layercountertwo) + " append to layer below us, appending type: " + functowhichrelapplies[i].typename + " with pointer: " + strconv.Itoa(functowhichrelapplies[i].pointer))
+			}
+
+		}
+
+		for i := range functowhichrelapplies {
+			fmt.Println("FuncToAllRell being called with index?: " + strconv.Itoa(functowhichrelapplies[i].pointer))
+			FuncToAllRel(JSONQuery, functowhichrelapplies[i])
+
+		}
+	}
+
+	if len(funcappliedtosubquery) != 0 {
+		//newlayercounter := layercounter
+		if layercounterthree == len(listoflists)-1 {
+			listoflists = append(listoflists, funcappliedtosubquery)
+			layercounterthree++
+			fmt.Println("RellToAllFunc Layercounter last prepend, appending funcappliedtosubquery")
+		} else {
+			layercounterthree++
+			for i := range funcappliedtosubquery {
+				listoflists[layercounterthree] = append(listoflists[layercounterthree], funcappliedtosubquery[i])
+				fmt.Println("RellToAllFunc Layercounter " + strconv.Itoa(layercounterthree) + " append to layer below us, appending type: " + funcappliedtosubquery[i].typename + " with pointer: " + strconv.Itoa(funcappliedtosubquery[i].pointer))
+
+			}
+
+		}
+
+		for i := range funcappliedtosubquery {
+			fmt.Println("FuncToAllRel being called with index?: " + strconv.Itoa(funcappliedtosubquery[i].pointer))
+			FuncToAllRel(JSONQuery, funcappliedtosubquery[i])
+		}
+	}
+
+}
+
+/*
+FuncToAllRel Get the relations connected (both relations that are in a subquery a function is applied to and relations that are connected to the function itself)
+ to a function and recursivly constructs part of the hierarchy
+If a relation is in a subquery that the function is applied to, we add the relation IN FRONT OF its respective function
+If a relation is connected to a function, we add the relation BEHIND its respective function
+	JSONQuery: *entity.IncomingQueryJSON, the query in JSON format
+	function: pdict, the function to find all connected relations for
+*/
+func FuncToAllRel(JSONQuery *entity.IncomingQueryJSON, function pdict) {
+	var funcappliedtosubquery pdictList
+	var relattachedtofunc pdictList
+	layercounter := FindCurrentLayer(listoflists, function)
+	for i := range JSONQuery.Relations {
+		if _, ok := funcdone[function.pointer]; !ok {
+			if _, ok := relfuncdone[i]; !ok {
+				// The func is attached to this relation
+				if JSONQuery.GroupBys[function.pointer].RelationID == i {
+					funcrel := pdict{
+						typename: "relation",
+						pointer:  i,
+					}
+					funcappliedtosubquery = append(funcappliedtosubquery, funcrel)
+
+				}
+
+				if JSONQuery.Relations[i].FromID == function.pointer && JSONQuery.Relations[i].FromType == "groupBy" {
+					fromrel := pdict{
+						typename: "relation",
+						pointer:  i,
+					}
+					relattachedtofunc = append(relattachedtofunc, fromrel)
+
+				} else if JSONQuery.Relations[i].ToID == function.pointer && JSONQuery.Relations[i].ToType == "groupBy" {
+					torel := pdict{
+						typename: "relation",
+						pointer:  i,
+					}
+					relattachedtofunc = append(relattachedtofunc, torel)
+
+				}
+
+			}
+		}
+	}
+	funcdone[function.pointer] = true
+
+	layercountertwo := layercounter
+	layercounterthree := layercounter
+	if len(funcappliedtosubquery) != 0 {
+		//newlayercounter := layercounter
+		if layercountertwo == 0 {
+			listoflists = prepend(listoflists, funcappliedtosubquery)
+			fmt.Println("FuncToAllRel Layercounter 0 prepend, prepending funcappliedtosubquery")
+
+		} else {
+			layercountertwo--
+			for i := range funcappliedtosubquery {
+				listoflists[layercountertwo] = append(listoflists[layercountertwo], funcappliedtosubquery[i])
+
+				fmt.Println("FuncToAllRel Layercounter " + strconv.Itoa(layercountertwo) + " append to layer below us, appending type: " + funcappliedtosubquery[i].typename + " with pointer: " + strconv.Itoa(funcappliedtosubquery[i].pointer))
+			}
+
+		}
+
+		for i := range funcappliedtosubquery {
+			fmt.Println("RelToEnt being called with index?: " + strconv.Itoa(funcappliedtosubquery[i].pointer))
+			RelToEnt(JSONQuery, funcappliedtosubquery[i])
+			fmt.Println("RelToAllFunc being called with index?: " + strconv.Itoa(funcappliedtosubquery[i].pointer))
+			RelToAllFunc(JSONQuery, funcappliedtosubquery[i])
+
+		}
+	}
+
+	if len(relattachedtofunc) != 0 {
+
+		if layercounterthree == len(listoflists)-1 {
+			listoflists = append(listoflists, relattachedtofunc)
+			layercounterthree++
+			fmt.Println("FuncToAllRel Layercounter last append, appending relattachedtofunc")
+
+		} else {
+			layercounterthree++
+			for i := range relattachedtofunc {
+				listoflists[layercounterthree] = append(listoflists[layercounterthree], relattachedtofunc[i])
+				fmt.Println("FuncToAllRel Layercounter " + strconv.Itoa(layercounterthree) + " append to layer below us, appending type: " + relattachedtofunc[i].typename + " with pointer: " + strconv.Itoa(relattachedtofunc[i].pointer))
+			}
+
+		}
+
+		for i := range relattachedtofunc {
+			fmt.Println("RelToEnt being called with index?: " + strconv.Itoa(relattachedtofunc[i].pointer))
+			RelToEnt(JSONQuery, relattachedtofunc[i])
+			fmt.Println("RelToAllFunc being called with index?: " + strconv.Itoa(relattachedtofunc[i].pointer))
+			RelToAllFunc(JSONQuery, relattachedtofunc[i])
+		}
+	}
+}
+
+func AddFilters(JSONQuery *entity.IncomingQueryJSON) {
+
+	for i, filter := range JSONQuery.Filters {
+		if _, ok := filterDone[i]; !ok {
+			p := pdict{
+				typename: filter.FromType,
+				pointer:  filter.FromID,
+			}
+			f := pdict{
+				typename: "filter",
+				pointer:  filter.ID,
+			}
+			addOneFilter(f, JSONQuery, p, &filterDone)
+		}
+	}
+}
+
+func addOneFilter(filterPDict pdict, JSONQuery *entity.IncomingQueryJSON, p pdict, filterDone *map[int]bool) {
+	if p.typename == "filter" && (*filterDone)[p.pointer] {
+		l := FindCurrentLayer(listoflists, p)
+		k := pdictList{}
+		if len(listoflists) > l+1 && listoflists[l+1][0].typename == "filter" {
+			listoflists[l+1] = append(listoflists[l+1], filterPDict)
+		} else {
+			listoflists = BelowAppend(listoflists, l, k)
+		}
+		(*filterDone)[filterPDict.pointer] = true
+	} else if p.typename == "filter" {
+		pnew := pdict{
+			typename: JSONQuery.Filters[p.pointer].FromType,
+			pointer:  JSONQuery.Filters[p.pointer].FromID,
+		}
+		addOneFilter(p, JSONQuery, pnew, filterDone)
+		l := FindCurrentLayer(listoflists, p)
+		k := pdictList{filterPDict}
+		if len(listoflists) > l+1 && listoflists[l+1][0].typename == "filter" {
+			listoflists[l+1] = append(listoflists[l+1], filterPDict)
+		} else {
+			listoflists = BelowAppend(listoflists, l, k)
+		}
+		(*filterDone)[filterPDict.pointer] = true
+	} else {
+		l := FindCurrentLayer(listoflists, p)
+		k := pdictList{filterPDict}
+		if len(listoflists) > l+1 && listoflists[l+1][0].typename == "filter" {
+			listoflists[l+1] = append(listoflists[l+1], filterPDict)
+		} else {
+			listoflists = BelowAppend(listoflists, l, k)
+		}
+		(*filterDone)[filterPDict.pointer] = true
+	}
+}
+
+// A function that appends 1 level above (if index is 0 this won't work)
+func AboveAppend(list []pdictList, index int, value pdictList) []pdictList {
+	if index == 0 {
+		return prepend(list, value)
+	}
+	return BelowAppend(list, index-1, value)
+}
+
+// A function that appends 1 level below
+func BelowAppend(lists []pdictList, index int, value pdictList) []pdictList {
+	if len(lists)-1 == index { // nil or empty slice or after last element
+		return append(lists, value)
+	}
+	k := make([]pdictList, len(lists[index+1:]))
+	copy(k, lists[index+1:])
+	l := make([]pdictList, len(lists[:index+1]))
+	copy(l, lists[:index+1])
+	lists = append(l, value) // index < len(a)
+	return append(lists, k...)
+}
+
+// A simple double-for loop that finds the layer in which an element resides in the hierarchy
+// Because we only append elements relative to another element, we can freely use this to keep track of layers
+func FindCurrentLayer(list []pdictList, element pdict) int {
+	currlayer := -1
+	for i, sublist := range list {
+		for j := range sublist {
+			if sublist[j].pointer == element.pointer && sublist[j].typename == element.typename {
+				currlayer = i
+				//break
+			}
+		}
+	}
+	return currlayer
+}
+
+// Adds a list of pdicts to the hierarchy, but IN FRONT OF the current layer
+// Only needed when a entire new list has to be inserted in front of the hierarcy
+// Prepending to existing layers can be done by decreasing the layercounter and appending
+// See XToY functions for example usage
+func prepend(list []pdictList, element pdictList) []pdictList {
+	var dummylist pdictList
+	dummy := pdict{
+		typename: "dummy",
+		pointer:  -1,
+	}
+	dummylist = append(dummylist, dummy)
+	list = append(list, dummylist)
+	copy(list[1:], list)
+	list[0] = element
+	return list
+}
diff --git a/cypher/hierarchy_test.go b/cypher/hierarchy_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..83160c1cadbac0f3da53c274fe635c4219403a00
--- /dev/null
+++ b/cypher/hierarchy_test.go
@@ -0,0 +1,333 @@
+package cypher
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"testing"
+
+	"git.science.uu.nl/graphpolaris/query-conversion/entity"
+	"github.com/stretchr/testify/assert"
+)
+
+func TestHierarchyBasic(t *testing.T) {
+	// Setup for test
+	// Create query conversion service
+	query := []byte(`{
+		"return": {
+			"entities": [
+				0,
+				1
+			],
+			"relations": [
+				0
+			],
+			"groupBys": []
+		},
+		"entities": [
+			{
+				"name": "parliament",
+				"ID": 0
+			},
+			{
+				"name": "parties",
+				"ID": 1
+			}
+		],
+		"relations": [
+			{
+				"ID": 0,
+				"name": "member_of",
+				"depth": {
+					"min": 1,
+					"max": 1
+				},
+				"fromType": "entity",
+				"fromID": 0,
+				"toType": "entity",
+				"toID": 1
+			}
+		],
+		"groupBys": [],
+		"filters": [
+			{
+				"ID": 0,
+				"fromType": "entity",
+				"fromID": 0,
+				"toType": "relation",
+				"toID": 0,
+				"attribute": "age",
+				"value": "45",
+				"dataType": "number",
+				"matchType": "GT",
+				"inType": "",
+				"inID": -1
+			},
+			{
+				"ID": 1,
+				"fromType": "relation",
+				"fromID": 0,
+				"toType": "relation",
+				"toID": 1,
+				"attribute": "isChairman",
+				"value": "45",
+				"dataType": "number",
+				"matchType": "GT",
+				"inType": "",
+				"inID": -1
+			}
+		],
+		"limit": 5000
+	}
+	`)
+
+	// Unmarshall the incoming message into an IncomingJSONQuery object
+	var JSONQuery entity.IncomingQueryJSON
+	json.Unmarshal(query, &JSONQuery)
+	search(&JSONQuery, 0)
+
+	// Assert that the result and the expected result are the same
+	correctResult := `[[{entity 0} {entity 1}] [{filter 0}] [{relation 0}] [{filter 1}]]`
+	assert.Equal(t, correctResult, fmt.Sprint(listoflists))
+	t.Fail()
+}
+
+func TestHierarchyRandomStart(t *testing.T) {
+	// Setup for test
+	// Create query conversion service
+	query := []byte(`{
+		"return": {
+			"entities": [
+				0,
+				1
+			],
+			"relations": [
+				0
+			],
+			"groupBys": []
+		},
+		"entities": [
+			{
+				"name": "parties",
+				"ID": 1
+			},
+			
+			{
+				"name": "parliament",
+				"ID": 0
+			}
+		],
+		"relations": [
+			{
+				"ID": 0,
+				"name": "member_of",
+				"depth": {
+					"min": 1,
+					"max": 1
+				},
+				"fromType": "entity",
+				"fromID": 0,
+				"toType": "entity",
+				"toID": 1
+			}
+		],
+		"groupBys": [],
+		"filters": [
+			{
+				"ID": 0,
+				"fromType": "entity",
+				"fromID": 0,
+				"toType": "relation",
+				"toID": 0,
+				"attribute": "age",
+				"value": "45",
+				"dataType": "number",
+				"matchType": "GT",
+				"inType": "",
+				"inID": -1
+			},
+			{
+				"ID": 1,
+				"fromType": "relation",
+				"fromID": 0,
+				"toType": "relation",
+				"toID": 1,
+				"attribute": "isChairman",
+				"value": "45",
+				"dataType": "number",
+				"matchType": "GT",
+				"inType": "",
+				"inID": -1
+			}
+		],
+		"limit": 5000
+	}
+	`)
+
+	// Unmarshall the incoming message into an IncomingJSONQuery object
+	var JSONQuery entity.IncomingQueryJSON
+	json.Unmarshal(query, &JSONQuery)
+	correctResult := make([]pdictList, 4)
+	correctResult[0] = pdictList{{typename: "entity", pointer: 0}, {typename: "entity", pointer: 1}}
+	correctResult[1] = pdictList{{typename: "filter", pointer: 0}}
+	correctResult[2] = pdictList{{typename: "relation", pointer: 0}}
+	correctResult[3] = pdictList{{typename: "filter", pointer: 1}}
+
+	for i := range JSONQuery.Entities {
+		search(&JSONQuery, i)
+		sortedListOfLists := make([]pdictList, len(listoflists))
+		for i, list := range listoflists {
+			k := make(pdictList, list.Len())
+			copy(k, list)
+			sort.Sort(k)
+			sortedListOfLists[i] = k
+		}
+		assert.Equal(t, fmt.Sprint(correctResult), fmt.Sprint(sortedListOfLists))
+	}
+
+}
+
+func TestHierarchyWithGroupby(t *testing.T) {
+	// Setup for test
+	// Create query conversion service
+	query := []byte(`{
+		"return": {
+			"entities": [
+				0,
+				1,
+				2,
+				3
+			],
+			"relations": [
+				0,
+				1,
+				2
+			]
+		},
+		"entities": [
+			{
+				"ID": 0,
+				"name": "parliament"
+			},
+			{
+				"ID": 1,
+				"name": "commissions"
+			},
+			{
+				"ID": 2,
+				"name": "parliament"
+			},
+			{
+				"ID": 3,
+				"name": "resolutions"
+			}
+		],
+		"relations": [
+			{
+				"type": "part_of",
+				"depth": {
+					"min": 1,
+					"max": 1
+				},
+				"fromType": "entity",
+				"fromId": 0,
+				"toType": "entity",
+				"toID": 1
+	   
+			},
+			{
+				"type": "part_of",
+				"depth": {
+					"min": 1,
+					"max": 1
+				},
+				"fromType": "groupBy",
+				"fromID": 0,
+				"toType": "entity",
+				"toID": 2
+	   
+			},
+			{
+				"type": "submits",
+				"depth": {
+					"min": 1,
+					"max": 1
+				},
+				"fromType": "entity",
+				"fromID": 2,
+				"toType": "entity",
+				"toID": 3
+			}
+		],
+		"groupBys": [
+			{
+				"ID": 0,
+				"groupType": "entity",
+				"groupID": 0,
+				"groupAttribute": "age",
+				"byType": "entity",
+				"byID": 1,
+				"byAttribute": "name",
+				"appliedModifier": "AVG",
+				"relationID": 0,
+				"constraints": [
+					{
+						"attribute": "age",
+						"value": "45",
+						"dataType": "number",
+						"matchType": "GT",
+						"functionPointer": {
+							"from": -1,
+							"to": -1
+						}
+			   
+					}
+				]
+			}
+		],
+		"filters": [
+			{
+				"ID": 0,
+				"fromType": "groupBy",
+				"fromID": 0,
+				"toType": "relation",
+				"toID": 1,
+				"attribute": "age",
+				"value": "45",
+				"dataType": "number",
+				"matchType": "GT",
+				"inType": "",
+				"inID": -1
+			}
+		],
+		"limit": 5000,
+		"modifiers": [],
+		"databaseName": "TweedeKamer"
+	}
+	`)
+
+	// Unmarshall the incoming message into an IncomingJSONQuery object
+	var JSONQuery entity.IncomingQueryJSON
+	json.Unmarshal(query, &JSONQuery)
+	correctResult := make([]pdictList, 5)
+	correctResult[0] = pdictList{{typename: "entity", pointer: 0}, {typename: "entity", pointer: 1}}
+	correctResult[1] = pdictList{{typename: "relation", pointer: 0}}
+	correctResult[2] = pdictList{{typename: "entity", pointer: 2}, {typename: "entity", pointer: 3}, {typename: "groupBy", pointer: 0}}
+	correctResult[3] = pdictList{{typename: "filter", pointer: 0}}
+	correctResult[4] = pdictList{{typename: "relation", pointer: 1}, {typename: "relation", pointer: 2}}
+
+	for i := range JSONQuery.Entities {
+		search(&JSONQuery, i)
+
+		fmt.Println(listoflists)
+		sortedListOfLists := make([]pdictList, len(listoflists))
+		for i, list := range listoflists {
+			k := make(pdictList, list.Len())
+			copy(k, list)
+			sort.Sort(k)
+			sortedListOfLists[i] = k
+		}
+		assert.Equal(t, fmt.Sprint(correctResult), fmt.Sprint(sortedListOfLists))
+	}
+
+}
diff --git a/entity/queryStruct.go b/entity/queryStruct.go
index 7b6c2760ac2ad7c0b1a002c516559011a85cdd5d..f8ae93c76d72ecfc19f2eefe6536afc3720a67f6 100644
--- a/entity/queryStruct.go
+++ b/entity/queryStruct.go
@@ -6,6 +6,8 @@ type IncomingQueryJSON struct {
 	Return       QueryReturnStruct
 	Entities     []QueryEntityStruct
 	Relations    []QueryRelationStruct
+	GroupBys     []QueryGroupByStruct
+	Filters      []QueryFilterStruct
 	// Limit is for limiting the amount of paths AQL will return in a relation let statement
 	Limit     int
 	Modifiers []QueryModifierStruct
@@ -15,22 +17,51 @@ type IncomingQueryJSON struct {
 type QueryReturnStruct struct {
 	Entities  []int
 	Relations []int
+	GroupBys  []int
 	//Modifiers []int
 }
 
 // QueryEntityStruct encapsulates a single entity with its corresponding constraints
 type QueryEntityStruct struct {
-	Type        string
-	Constraints []QueryConstraintStruct
+	ID   int
+	Name string
 }
 
 // QueryRelationStruct encapsulates a single relation with its corresponding constraints
 type QueryRelationStruct struct {
-	Type        string
-	EntityFrom  int
-	EntityTo    int
-	Depth       QuerySearchDepthStruct
-	Constraints []QueryConstraintStruct
+	ID       int
+	Name     string
+	FromType string
+	FromID   int
+	ToType   string
+	ToID     int
+	Depth    QuerySearchDepthStruct
+}
+
+type QueryGroupByStruct struct {
+	ID              int
+	GroupType       string
+	GroupID         int
+	GroupAttribute  string
+	ByType          string
+	ByID            int
+	ByAttribute     string
+	AppliedModifier string
+	RelationID      int
+}
+
+type QueryFilterStruct struct {
+	ID        int
+	FromType  string
+	FromID    int
+	ToType    string
+	ToID      int
+	Attribute string
+	DataType  string
+	MatchType string
+	Value     string
+	InType    string
+	InID      int
 }
 
 // QueryModifierStruct encapsulates a single modifier with its corresponding constraints
@@ -50,11 +81,5 @@ type QuerySearchDepthStruct struct {
 // QueryConstraintStruct holds the information of the constraint
 // Constraint datatypes
 // 	string     MatchTypes: exact/contains/startswith/endswith
-// 	int   MatchTypes: GT/LT/EQ
+// 	int   MatchTypes: GT/LT/EQ/
 // 	bool     MatchTypes: EQ/NEQ
-type QueryConstraintStruct struct {
-	Attribute string
-	Value     string
-	DataType  string
-	MatchType string
-}