-
Notifications
You must be signed in to change notification settings - Fork 28
/
Copy path2_2-Mortality Prediction Modeling.py
60 lines (52 loc) · 2.46 KB
/
2_2-Mortality Prediction Modeling.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
###########################################################################################################
# Mortality Modeling
###########################################################################################################
#
# Licensed under the Apache License, Version 2.0**
# You may not use this file except in compliance with the License. You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and limitations under the License.
#-> Authors:
# Luis R Soenksen (<[email protected]>),
# Yu Ma (<[email protected]>),
# Cynthia Zeng (<[email protected]>),
# Leonard David Jean Boussioux (<[email protected]>),
# Kimberly M Villalobos Carballo (<[email protected]>),
# Liangyuan Na (<[email protected]>),
# Holly Mika Wiberg (<[email protected]>),
# Michael Lingzhi Li (<[email protected]>),
# Ignacio Fuentes (<[email protected]>),
# Dimitris J Bertsimas (<[email protected]>),
# -> Last Update: Dec 30th, 2021
import pandas as pd
from glob import glob
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import csv
import sys
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
import xgboost as xgb
import numpy as np
# Supply the embedding file
fname = fname
df = pd.read_csv(fname)
df_death_small48 = df[((df['img_length_of_stay'] < 48) & (df['death_status'] == 1))]
df_alive_big48 = df[((df['img_length_of_stay'] >= 48) & (df['death_status'] == 0))]
df_death_big48 = df[((df['img_length_of_stay'] >= 48) & (df['death_status'] == 1))]
df_death_small48['y'] = 1
df_alive_big48['y'] = 0
df_death_big48['y'] = 0
df = pd.concat([df_death_small48, df_alive_big48, df_death_big48], axis = 0)
df = df.drop(['img_id', 'img_charttime', 'img_deltacharttime', 'discharge_location', 'img_length_of_stay',
'death_status'], axis = 1)
data_type_dict = get_data_dict(df)
all_types_experiment = get_all_dtypes()
# Index of which we run the experiment on, this is for the sake of parallelize all experiments
ind = ind
data_type, model = all_types_experiment[ind]
run_models(data_fusion(data_type), data_type, model)