add rule generaton from inout json
This commit is contained in:
parent
ebe1e20dab
commit
b6a739429c
27
example_input.json
Normal file
27
example_input.json
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
[{"id": 1, "group_id": 11, "timetable_id": 3, "sorting": 0},
|
||||||
|
{"id": 2, "group_id": 11, "timetable_id": 2, "sorting": 1},
|
||||||
|
{"id": 3, "group_id": 11, "timetable_id": 10, "sorting": 2},
|
||||||
|
{"id": 4, "group_id": 11, "timetable_id": 6, "sorting": 3},
|
||||||
|
{"id": 5, "group_id": 11, "timetable_id": 11, "sorting": 4},
|
||||||
|
{"id": 6, "group_id": 11, "timetable_id": 4, "sorting": 5},
|
||||||
|
{"id": 7, "group_id": 11, "timetable_id": 8, "sorting": 6},
|
||||||
|
{"id": 8, "group_id": 11, "timetable_id": 5, "sorting": 7},
|
||||||
|
{"id": 9, "group_id": 11, "timetable_id": 9, "sorting": 8},
|
||||||
|
{"id": 10, "group_id": 11, "timetable_id": 1, "sorting": 9},
|
||||||
|
{"id": 11, "group_id": 11, "timetable_id": 12, "sorting": 10},
|
||||||
|
{"id": 12, "group_id": 11, "timetable_id": 7, "sorting": 11},
|
||||||
|
{"id": 13, "group_id": 12, "timetable_id": 14, "sorting": 0},
|
||||||
|
{"id": 14, "group_id": 12, "timetable_id": 15, "sorting": 1},
|
||||||
|
{"id": 15, "group_id": 12, "timetable_id": 13, "sorting": 2},
|
||||||
|
{"id": 16, "group_id": 13, "timetable_id": 1, "sorting": 0},
|
||||||
|
{"id": 17, "group_id": 13, "timetable_id": 3, "sorting": 1},
|
||||||
|
{"id": 18, "group_id": 13, "timetable_id": 5, "sorting": 2},
|
||||||
|
{"id": 19, "group_id": 13, "timetable_id": 4, "sorting": 3},
|
||||||
|
{"id": 20, "group_id": 13, "timetable_id": 6, "sorting": 4},
|
||||||
|
{"id": 21, "group_id": 13, "timetable_id": 10, "sorting": 5},
|
||||||
|
{"id": 22, "group_id": 13, "timetable_id": 7, "sorting": 6},
|
||||||
|
{"id": 23, "group_id": 13, "timetable_id": 12, "sorting": 7},
|
||||||
|
{"id": 24, "group_id": 13, "timetable_id": 2, "sorting": 8},
|
||||||
|
{"id": 25, "group_id": 13, "timetable_id": 9, "sorting": 9},
|
||||||
|
{"id": 26, "group_id": 13, "timetable_id": 8, "sorting": 10},
|
||||||
|
{"id": 27, "group_id": 13, "timetable_id": 11, "sorting": 11}]
|
116
generate.py
Normal file
116
generate.py
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import random
|
||||||
|
import math
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
from pprint import pprint as pprint
|
||||||
|
|
||||||
|
if sys.argv[1] == "help":
|
||||||
|
print(f"{sys.argv[0]} [input_json_file] [maximum_number of students per group]\n {sys.argv[0]} foo.json 12")
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
def generate_random_set(f, s, g):
|
||||||
|
for i in range(1,s):
|
||||||
|
f.write(f"schedule('sched{i}').\n")
|
||||||
|
|
||||||
|
for j in range(1,g):
|
||||||
|
r = math.floor(random.gauss(4, 2))
|
||||||
|
f.write(f"group('group{j}', {r}).\n")
|
||||||
|
|
||||||
|
for group in range(1,g):
|
||||||
|
for sched in range(1,s):
|
||||||
|
rank = random.randrange(s)
|
||||||
|
if rank == 0:
|
||||||
|
break
|
||||||
|
f.write(f"rank('group{group}', 'sched{sched}', {rank}).\n")
|
||||||
|
|
||||||
|
def parse_counts(counts):
|
||||||
|
ret = {}
|
||||||
|
for count in counts:
|
||||||
|
c = count[6:-2]
|
||||||
|
g, s = c.split(',\'')
|
||||||
|
ret[g] = s
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def parse_matchings(matchings):
|
||||||
|
ret = {}
|
||||||
|
for match in matchings:
|
||||||
|
c = match[10:-2]
|
||||||
|
g, s = c.split('\',\'')
|
||||||
|
ret[g] = s
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
class Rules():
|
||||||
|
def __init__(self, json_path):
|
||||||
|
self.timetables = []
|
||||||
|
self.groups = {}
|
||||||
|
self.rankings = []
|
||||||
|
|
||||||
|
with open(json_path, 'r') as f:
|
||||||
|
data = json.loads(f.read())
|
||||||
|
|
||||||
|
for d in data:
|
||||||
|
if d["group_id"] not in self.groups:
|
||||||
|
self.groups[d["group_id"]] = 1
|
||||||
|
else:
|
||||||
|
self.groups[d["group_id"]] += 1
|
||||||
|
|
||||||
|
self.rankings.append((d["group_id"], d["timetable_id"], d["sorting"]))
|
||||||
|
|
||||||
|
def addTimetable(self, timtableName):
|
||||||
|
self.timetables.append(timtableName)
|
||||||
|
def generateRules(self):
|
||||||
|
r = ""
|
||||||
|
for t in self.timetables:
|
||||||
|
r += f"schedule('sched{t}').\n"
|
||||||
|
for name in self.groups:
|
||||||
|
m = self.groups[name]
|
||||||
|
r +=f"group('group{name}', {m}).\n"
|
||||||
|
for g in self.rankings:
|
||||||
|
|
||||||
|
r+= f"rank('group{g[0]}', 'sched{g[1]}', {g[2]}).\n"
|
||||||
|
return r
|
||||||
|
|
||||||
|
rules = Rules(sys.argv[1])
|
||||||
|
for i in range(1,13):
|
||||||
|
rules.addTimetable(i)
|
||||||
|
|
||||||
|
#print(rules.generateRules())
|
||||||
|
|
||||||
|
max = sys.argv[2]
|
||||||
|
|
||||||
|
f = open('.rules.pl', 'w')
|
||||||
|
base = open('rules.pl', 'r').read()
|
||||||
|
f.write(base.replace("{max}", max))
|
||||||
|
f.write(rules.generateRules())
|
||||||
|
f.close()
|
||||||
|
# create subprocess definition
|
||||||
|
process = ['clingo', ".rules.pl", '--outf=2', '-n 3', '-t 3', '--configuration=tweety'] # output json, n number of schedules, t threads
|
||||||
|
|
||||||
|
# run subprocess, not sure, if busy-waiting..
|
||||||
|
completed_process = subprocess.run(process, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||||
|
|
||||||
|
output = completed_process.stdout
|
||||||
|
|
||||||
|
open('solver_output_simple.json', 'w').write(output)
|
||||||
|
|
||||||
|
results = json.loads(output)
|
||||||
|
|
||||||
|
matchings = []
|
||||||
|
counts = []
|
||||||
|
|
||||||
|
for r in results["Call"]:
|
||||||
|
w = r["Witnesses"][-1]
|
||||||
|
for m in w["Value"]:
|
||||||
|
if 'count' in m:
|
||||||
|
counts.append(m)
|
||||||
|
if 'matching' in m:
|
||||||
|
matchings.append(m)
|
||||||
|
|
||||||
|
matchings = parse_matchings(matchings)
|
||||||
|
counts = parse_counts(counts)
|
||||||
|
pprint(counts)
|
Loading…
Reference in a new issue