Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
S
SMART
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
3
Issues
3
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UNI-KLU
SMART
Commits
a878064e
Commit
a878064e
authored
Jul 26, 2021
by
Alexander Lercher
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Correctly predicting with scaled metrics data
parent
d94b70d7
Changes
3
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
268 additions
and
368 deletions
+268
-368
predict.ipynb
...active-community-detection-microservice/app/predict.ipynb
+240
-357
predict_single_context.py
...-microservice/app/processing/ml/predict_single_context.py
+22
-7
train_single_context.py
...on-microservice/app/processing/ml/train_single_context.py
+6
-4
No files found.
src/data-hub/proactive-community-detection-microservice/app/predict.ipynb
View file @
a878064e
This diff is collapsed.
Click to expand it.
src/data-hub/proactive-community-detection-microservice/app/processing/ml/predict_single_context.py
View file @
a878064e
...
@@ -17,7 +17,8 @@ from typing import Dict
...
@@ -17,7 +17,8 @@ from typing import Dict
from
typing
import
Tuple
from
typing
import
Tuple
def
get_metrics
(
cur_cluster
:
Cluster
)
->
Tuple
:
def
get_metrics
(
cur_cluster
:
Cluster
)
->
Tuple
:
return
(
cur_cluster
.
size
,
cur_cluster
.
std_dev
,
cur_cluster
.
scarcity
,
cur_cluster
.
importance1
,
cur_cluster
.
importance2
,
cur_cluster
.
range_
,
cur_cluster
.
global_center_distance
,
get_cyclic_time_feature
(
cur_cluster
.
get_time_info
()))
return
(
cur_cluster
.
size
,
cur_cluster
.
std_dev
,
cur_cluster
.
scarcity
,
cur_cluster
.
importance1
,
cur_cluster
.
importance2
,
cur_cluster
.
range_
,
cur_cluster
.
global_center_distance
,
get_cyclic_time_feature
(
cur_cluster
.
get_time_info
()))
####################
####################
import
pickle
import
pickle
#####################
#####################
...
@@ -53,9 +54,8 @@ repo = Repository()
...
@@ -53,9 +54,8 @@ repo = Repository()
def
run_prediction
(
use_case
:
str
):
def
run_prediction
(
use_case
:
str
):
for
layer
in
repo
.
get_layers_for_use_case
(
use_case
):
for
layer
in
repo
.
get_layers_for_use_case
(
use_case
):
layer_name
=
layer
.
layer_name
layer_name
=
layer
.
layer_name
print
(
f
"Predicting {method} for {use_case}//{layer_name}"
)
################
df
:
DataFrame
=
pd
.
read_csv
(
f
'data/{use_case}/ml_input/single_context/{layer_name}.csv'
,
index_col
=
0
)
#################
#################
path_in
=
f
"data/{use_case}/cluster_metrics/{layer_name}.json"
path_in
=
f
"data/{use_case}/cluster_metrics/{layer_name}.json"
with
open
(
path_in
,
'r'
)
as
file
:
with
open
(
path_in
,
'r'
)
as
file
:
...
@@ -75,12 +75,27 @@ def run_prediction(use_case: str):
...
@@ -75,12 +75,27 @@ def run_prediction(use_case: str):
####################
####################
with
open
(
f
'data/{use_case}/ml_output/{method}/{layer_name}.model'
,
'rb'
)
as
file
:
with
open
(
f
'data/{use_case}/ml_output/{method}/{layer_name}.model'
,
'rb'
)
as
file
:
svc
=
pickle
.
load
(
file
)
svc
=
pickle
.
load
(
file
)
####################
with
open
(
f
'data/{use_case}/ml_output/{method}/{layer_name}_scaler.model'
,
'rb'
)
as
file
:
scaler
=
pickle
.
load
(
file
)
#####################
#####################
# store id, future time window, and flattened metrics to combine the latter during prediction
prediction_cluster_ids
=
[]
prediction_time_windows
=
[]
prediction_metrics
=
[]
for
cluster_id
,
time_windows
in
cluster_map
.
items
():
for
cluster_id
,
time_windows
in
cluster_map
.
items
():
v
=
[
get_metrics
(
c
)
for
c
in
time_windows
[
-
N
:]]
# metrics for last N time windows
v
=
[
get_metrics
(
c
)
for
c
in
time_windows
[
-
N
:]]
# metrics for last N time windows
v_flattened
=
flatten_metrics_datapoint
(
v
)
v_flattened
=
flatten_metrics_datapoint
(
v
)
v_flattened
=
v_flattened
.
reshape
(
1
,
v_flattened
.
shape
[
0
])
# reshape for ML with only 1 pred value
res
=
PredictionResult
(
use_case
,
use_case
,
method
,
layer_name
,
None
,
cluster_id
,
increase_time_window
(
time_windows
[
-
1
]
.
time_window_id
),
svc
.
predict
(
v_flattened
)[
0
])
repo
.
add_prediction_result
(
res
)
#####################
prediction_cluster_ids
.
append
(
cluster_id
)
prediction_time_windows
.
append
(
increase_time_window
(
time_windows
[
-
1
]
.
time_window_id
))
prediction_metrics
.
append
(
v_flattened
)
# predict all at once for speedup
prediction_results
=
svc
.
predict
(
scaler
.
transform
(
np
.
array
(
prediction_metrics
)))
print
(
np
.
unique
(
prediction_results
,
return_counts
=
True
))
for
i
in
range
(
len
(
prediction_cluster_ids
)):
res
=
PredictionResult
(
use_case
,
use_case
,
method
,
layer_name
,
None
,
prediction_cluster_ids
[
i
],
prediction_time_windows
[
i
],
prediction_results
[
i
])
repo
.
add_prediction_result
(
res
)
src/data-hub/proactive-community-detection-microservice/app/processing/ml/train_single_context.py
View file @
a878064e
...
@@ -8,10 +8,10 @@ approach = 'single_context'
...
@@ -8,10 +8,10 @@ approach = 'single_context'
import
pickle
import
pickle
from
pathlib
import
Path
from
pathlib
import
Path
def
export_model
(
model
,
use_case
,
layer_name
):
def
export_model
(
model
,
use_case
,
layer_name
,
scaler
=
False
):
fpath
=
f
'data/{use_case}/ml_output/{approach}'
fpath
=
f
'data/{use_case}/ml_output/{approach}'
Path
(
fpath
)
.
mkdir
(
parents
=
True
,
exist_ok
=
True
)
Path
(
fpath
)
.
mkdir
(
parents
=
True
,
exist_ok
=
True
)
with
open
(
f
'{fpath}/{layer_name}.model'
,
'wb'
)
as
f
:
with
open
(
f
'{fpath}/{layer_name}
{"_scaler" if scaler else ""}
.model'
,
'wb'
)
as
f
:
pickle
.
dump
(
model
,
f
)
pickle
.
dump
(
model
,
f
)
#####################
#####################
from
sklearn.ensemble
import
RandomForestClassifier
from
sklearn.ensemble
import
RandomForestClassifier
...
@@ -45,11 +45,13 @@ def run_training(use_case):
...
@@ -45,11 +45,13 @@ def run_training(use_case):
from
sklearn.preprocessing
import
StandardScaler
from
sklearn.preprocessing
import
StandardScaler
scaler
=
StandardScaler
()
scaler
=
StandardScaler
()
train_X
=
scaler
.
fit_transform
(
training
)[:,:
-
1
]
# all except y
train_X
=
scaler
.
fit_transform
(
training
[
training
.
columns
[:
-
1
]])
# all except y
train_Y
=
training
[
training
.
columns
[
-
1
]]
train_Y
=
training
[
training
.
columns
[
-
1
]]
test_X
=
scaler
.
transform
(
testing
)[:,:
-
1
]
# all except y
test_X
=
scaler
.
transform
(
testing
[
testing
.
columns
[:
-
1
]])
# all except y
test_Y
=
testing
[
testing
.
columns
[
-
1
]]
test_Y
=
testing
[
testing
.
columns
[
-
1
]]
export_model
(
scaler
,
use_case
,
layer_name
,
scaler
=
True
)
########################
########################
from
processing
import
DataSampler
from
processing
import
DataSampler
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment