added tv2news strategy
This commit is contained in:
commit
d6b192cc52
|
@ -21,6 +21,7 @@ strategies = {
|
||||||
"steam": (1.0, steam.probability),
|
"steam": (1.0, steam.probability),
|
||||||
"australia": (1.0, miloStrats.australiaStrat),
|
"australia": (1.0, miloStrats.australiaStrat),
|
||||||
"camera": (1.0, miloStrats.camImgStrat),
|
"camera": (1.0, miloStrats.camImgStrat),
|
||||||
|
"tv2news": (1.0, miloStrats.tv2newsStrat)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
import requests
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
from pytz import timezone
|
from pytz import timezone
|
||||||
|
|
||||||
from ..util import Context, Prediction
|
from ..util import Context, Prediction
|
||||||
|
#from server.nightr.util import Context, Prediction
|
||||||
|
|
||||||
|
|
||||||
def camImgStrat(context : Context) -> Prediction:
|
def camImgStrat(context : Context) -> Prediction:
|
||||||
|
@ -25,16 +27,37 @@ def camImgStrat(context : Context) -> Prediction:
|
||||||
|
|
||||||
def australiaStrat(context : Context) -> Prediction:
|
def australiaStrat(context : Context) -> Prediction:
|
||||||
"""
|
"""
|
||||||
Time in Australia
|
Using time in Australia
|
||||||
"""
|
"""
|
||||||
australia = timezone('Australia/Melbourne')
|
australia = timezone('Australia/Melbourne')
|
||||||
t = datetime.now().astimezone(australia)
|
t = datetime.now().astimezone(australia)
|
||||||
hour = t.hour
|
hour = t.hour
|
||||||
p = Prediction()
|
p = Prediction()
|
||||||
if hour > 22 or hour < 6:
|
if hour > 22 or hour < 6:
|
||||||
p.probability = 1.0
|
|
||||||
p.reasons.append('It\'s day-time in Australia')
|
|
||||||
else:
|
|
||||||
p.probability = 0.0
|
p.probability = 0.0
|
||||||
p.reasons.append('It\'s night-time in Australia')
|
p.reasons.append('It\'s night-time in Australia')
|
||||||
|
else:
|
||||||
|
p.probability = 1.0
|
||||||
|
p.reasons.append('It\'s day-time in Australia')
|
||||||
return p
|
return p
|
||||||
|
|
||||||
|
def tv2newsStrat(context : Context) -> Prediction:
|
||||||
|
r = requests.get('http://mpx.services.tv2.dk/api/latest')
|
||||||
|
data = r.json()
|
||||||
|
publish_dates = [(x['pubDate'])//1000 for x in data][:10]
|
||||||
|
delta_times = []
|
||||||
|
for i in range(len(publish_dates)):
|
||||||
|
if i == 0 : continue
|
||||||
|
delta_times.append(publish_dates[i-1] - publish_dates[i])
|
||||||
|
|
||||||
|
avg_delta = 0
|
||||||
|
for d in delta_times:
|
||||||
|
avg_delta += d
|
||||||
|
avg_timestamp = avg_delta // len(delta_times) // 60
|
||||||
|
p = Prediction()
|
||||||
|
print('average time between articles on tv2:', avg_timestamp, 'minutes')
|
||||||
|
p.probability = 1.0 if avg_timestamp > 50 else 0.0
|
||||||
|
p.reasons.append('There were ' + ('few' if avg_timestamp > 50 else 'many') + ' recent articles on TV2 News')
|
||||||
|
print(p.reasons[0])
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
1
server/nightr/strategies/parking_aarhus_1430.json
Normal file
1
server/nightr/strategies/parking_aarhus_1430.json
Normal file
|
@ -0,0 +1 @@
|
||||||
|
{"help": "https://portal.opendata.dk/api/3/action/help_show?name=datastore_search", "success": true, "result": {"include_total": true, "resource_id": "2a82a145-0195-4081-a13c-b0e587e9b89c", "fields": [{"type": "int", "id": "_id"}, {"type": "text", "id": "date"}, {"type": "text", "id": "garageCode"}, {"type": "int4", "id": "totalSpaces"}, {"type": "int4", "id": "vehicleCount"}], "records_format": "objects", "records": [{"_id": 1, "date": "2019/04/06 14:30:01", "garageCode": "NORREPORT", "totalSpaces": 80, "vehicleCount": 61}, {"_id": 2, "date": "2019/04/06 14:30:01", "garageCode": "SCANDCENTER", "totalSpaces": 1240, "vehicleCount": 1033}, {"_id": 6, "date": "2019/04/06 14:30:01", "garageCode": "SALLING", "totalSpaces": 700, "vehicleCount": 575}, {"_id": 7, "date": "2019/04/06 14:30:01", "garageCode": "DOKK1", "totalSpaces": 1000, "vehicleCount": 0}, {"_id": 8, "date": "2019/04/06 14:30:01", "garageCode": "Navitas", "totalSpaces": 449, "vehicleCount": 208}, {"_id": 9, "date": "2019/04/06 14:30:01", "garageCode": "NewBusgadehuset", "totalSpaces": 105, "vehicleCount": 101}, {"_id": 3, "date": "2019/04/06 14:30:01", "garageCode": "BRUUNS", "totalSpaces": 953, "vehicleCount": 598}, {"_id": 4, "date": "2019/04/06 14:30:01", "garageCode": "MAGASIN", "totalSpaces": 378, "vehicleCount": 361}, {"_id": 5, "date": "2019/04/06 14:30:01", "garageCode": "KALKVAERKSVEJ", "totalSpaces": 210, "vehicleCount": 278}, {"_id": 10, "date": "2019/04/06 14:30:01", "garageCode": "Urban Level 1", "totalSpaces": 319, "vehicleCount": 99}, {"_id": 11, "date": "2019/04/06 14:30:01", "garageCode": "Urban Level 2+3", "totalSpaces": 654, "vehicleCount": 170}], "_links": {"start": "/api/3/action/datastore_search?resource_id=2a82a145-0195-4081-a13c-b0e587e9b89c", "next": "/api/3/action/datastore_search?offset=100&resource_id=2a82a145-0195-4081-a13c-b0e587e9b89c"}, "total": 11}}
|
|
@ -1,6 +1,7 @@
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
|
||||||
def determine_month():
|
def determine_month():
|
||||||
ds = pd.read_excel(urllib.request.urlopen('https://sundogbaelt.dk/wp-content/uploads/2019/04/trafiktal-maaned.xls'))
|
ds = pd.read_excel(urllib.request.urlopen('https://sundogbaelt.dk/wp-content/uploads/2019/04/trafiktal-maaned.xls'))
|
||||||
|
@ -12,3 +13,10 @@ def determine_month():
|
||||||
last_year_total = sum(ds['Total'][amount_of_cur_year+1:amount_of_cur_year+13])
|
last_year_total = sum(ds['Total'][amount_of_cur_year+1:amount_of_cur_year+13])
|
||||||
|
|
||||||
return ((12/(last_year_total//cur_year_total))+1), cur_year_total, last_year_total
|
return ((12/(last_year_total//cur_year_total))+1), cur_year_total, last_year_total
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def write_json(url, data_name, time):
|
||||||
|
r = requests.get(url)
|
||||||
|
with open(f"{data_name}_{time}.json", 'w') as f:
|
||||||
|
json.dump(r.json(), f)
|
||||||
|
|
44
server/nightr/strategies/svm_strat.py
Normal file
44
server/nightr/strategies/svm_strat.py
Normal file
|
@ -0,0 +1,44 @@
|
||||||
|
from sklearn import svm
|
||||||
|
from sklearn.externals import joblib
|
||||||
|
import requests
|
||||||
|
import glob
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
from server.nightr.strategies.strat_utils import write_json
|
||||||
|
|
||||||
|
|
||||||
|
def find_data(time):
|
||||||
|
write_json("https://portal.opendata.dk/api/3/action/datastore_search?resource_id=2a82a145-0195-4081-a13c-b0e587e9b89c", "parking_aarhus", time)
|
||||||
|
|
||||||
|
def load_data():
|
||||||
|
|
||||||
|
X = []
|
||||||
|
Y = []
|
||||||
|
|
||||||
|
for filename in glob.glob("parking_aarhus*"):
|
||||||
|
p_class = '2330' in filename
|
||||||
|
|
||||||
|
with open(filename) as file:
|
||||||
|
data = json.load(file)
|
||||||
|
|
||||||
|
records = data['result']['records']
|
||||||
|
frequencies = [house['vehicleCount'] / house['totalSpaces'] for house in records]
|
||||||
|
X.append(frequencies)
|
||||||
|
Y.append(int(p_class))
|
||||||
|
|
||||||
|
return np.array(X), np.array(Y)
|
||||||
|
|
||||||
|
def train():
|
||||||
|
X, Y = load_data()
|
||||||
|
classifier = svm.SVC(C=10, gamma=0.01, probability=True)
|
||||||
|
classifier.fit(X, Y)
|
||||||
|
joblib.dump(classifier, "nightness_classifier.pkl")
|
||||||
|
|
||||||
|
def predict(X):
|
||||||
|
classifier = joblib.load("nightness_classifier.pkl")
|
||||||
|
prob = classifier.predict_proba(X)
|
||||||
|
return prob[0, 1]
|
||||||
|
|
||||||
|
train()
|
Loading…
Reference in a new issue