Commit 304d6f3f authored by Antoine Guillaume's avatar Antoine Guillaume

Adding subwindow approach support

parent e3496cd6
......@@ -31,7 +31,7 @@ from sklearn.preprocessing import MinMaxScaler
# In[3]:
#Base path, all necessary folders are supposed to be contained in this one.
base_path = r"!! REPLACE BY YOUR PATH !!"
base_path = r"/home/prof/guillaume/"
#Path to the life cycles CSV files.
dataset_path = base_path+r"datasets/"
......@@ -40,32 +40,36 @@ dataset_path = base_path+r"datasets/"
result_path = base_path+r"results/"
#If not None, CSV files containing data used by the TS-CHIEF java program will be outputed
TSCHIEF_path = dataset_path+r"TSCHIEF/"
#dataset_path+r"TSCHIEF/"
TSCHIEF_path = None
#If True, perform cross validation of all defined pipelines
do_cross_validation = True
#If not None, will output results of cross validation as a latex file (csv results are still exported)
produce_latex = base_path+'results.tex'
#Number of days to consider at the end of the life cycles. Life cycles shorter than n_days+1 will be dropped (+1 for temporal alignment).
n_days = 21
#Separator used when procuding csv outputs
csv_separator = ';'
#Resample frequency. T = minutes
resample_freq = '20T'
#Size of the predicitve padding
predictive_padding_hours = 48
#Extend the infected interval to cover restart process, setting it to 0 will introduce bias.
#Extend the infected interval to cover possible restart process.
extended_infected_interval_hours = 24
#Size of the PAA transform output
size=1000
#If not None, will output results of cross validation as a latex file (csv results are still exported)
produce_latex = base_path+'results'+str(n_days)+'J_'+resample_freq+'.tex'
#Separator used when procuding csv outputs
csv_separator = ';'
#Number of cross validation splits
n_splits=10
# Number of process to launch in parallel for cross validation of each pipeline.
# Set to None if you don't have the setup to allow such speedups.
n_cv_jobs=None
n_cv_jobs=-1
if dataset_path is not None and not exists(dataset_path):
mkdir(dataset_path)
......@@ -116,11 +120,11 @@ def process_cycle(file_name, path, predictive_interval, infected_interval):
#Apply Predictive interval
date = pd.Timestamp((data.iloc[-1]['date'] - timedelta(hours=predictive_interval)))
data.drop(data[data['date'] >= date].index,axis=0,inplace=True)
data.reset_index(drop=True,inplace=True)
#Apply infected interval
if data.shape[0] > 0:
date = pd.Timestamp((data.iloc[0]['date'] + timedelta(hours=infected_interval)))
data.drop(data[data['date'] <= date].index,axis=0,inplace=True)
#Reset index
data.reset_index(drop=True,inplace=True)
if data.shape[0] > 0:
return (data, y)
......@@ -134,6 +138,26 @@ life_cycles = np.asarray([process_cycle(file_name, dataset_path,
predictive_padding_hours,
extended_infected_interval_hours) for file_name in file_list])
def last_X_days(data, y, X, min_data=0.33):
if (data.iloc[-1]['date'] - data.iloc[0]['date']) >= timedelta(days=X+1):
lim_date = pd.Timestamp(data.iloc[-1]['date'].date())
#Remove not complete day
data = data[data['date'] < lim_date]
#Remove
date = pd.Timestamp((lim_date - timedelta(days=X)))
data = data.drop(data[data['date'] < date].index,axis=0)
if data.shape[0] > ((X*24*60)/int(resample_freq[0:2]))*min_data:
return data,y
else:
return None
else:
return None
life_cycles = np.asarray([last_X_days(x[0],x[1],n_days) for x in life_cycles if x is not None])
print('\nData Loaded')
# # Define data encoding functions
......@@ -142,7 +166,7 @@ print('\nData Loaded')
codes = []
for x in [data[0]['cod_evt'].unique() for data in life_cycles if data is not None]:
for x in [x[0]['cod_evt'].unique() for x in life_cycles if x is not None]:
codes.extend(x)
codes = np.unique(codes) #Unique event codes present in the data, in increasing order
......@@ -197,6 +221,7 @@ def get_R3_dict(codes, spacing=200):
return dict_codes
def get_R4_dict(codes):
codes = np.append(codes, ['-1'])
vals = np.arange(codes.shape[0])
np.random.shuffle(vals)
return {code : vals[i] for i,code in enumerate(codes)}
......@@ -210,267 +235,308 @@ def apply_code_dict(df, code_dic, code_column='cod_evt'):
return df
# In[11]:
# # Define pipelines
# We now define the pipelines that we will use for crossvalidation
# In[11]:
max_features=100
pipeline_dict = {}
#FLATTENED IMAGE
pipeline_dict.update({"PAA Gramian Flat RF":make_pipeline(Gramian_transform(flatten=True),
pipeline_dict.update({"Gramian Flat RF":make_pipeline(Gramian_transform(flatten=True),
Random_Forest())})
pipeline_dict.update({"PAA Recurrence Flat RF":make_pipeline(Recurrence_transform(flatten=True),
pipeline_dict.update({"Recurrence Flat RF":make_pipeline(Recurrence_transform(flatten=True),
Random_Forest())})
pipeline_dict.update({"PAA Gramian Flat SVM":make_pipeline(Gramian_transform(flatten=True),
pipeline_dict.update({"Gramian Flat SVM":make_pipeline(Gramian_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
SVM_classif())})
pipeline_dict.update({"PAA Recurrence Flat SVM":make_pipeline(Recurrence_transform(flatten=True),
pipeline_dict.update({"Recurrence Flat SVM":make_pipeline(Recurrence_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
SVM_classif())})
pipeline_dict.update({"PAA Gramian Flat KNN":make_pipeline(Gramian_transform(flatten=True),
pipeline_dict.update({"Gramian Flat KNN":make_pipeline(Gramian_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
KNN_classif())})
pipeline_dict.update({"PAA Recurrence Flat KNN":make_pipeline(Recurrence_transform(flatten=True),
pipeline_dict.update({"Recurrence Flat KNN":make_pipeline(Recurrence_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
KNN_classif())})
pipeline_dict.update({"PAA Gramian Flat Ridge":make_pipeline(Gramian_transform(flatten=True),
pipeline_dict.update({"Gramian Flat Ridge":make_pipeline(Gramian_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Ridge_classif())})
pipeline_dict.update({"PAA Recurrence Flat Ridge":make_pipeline(Recurrence_transform(flatten=True),
pipeline_dict.update({"Recurrence Flat Ridge":make_pipeline(Recurrence_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Ridge_classif())})
#TIME SERIE CLASSIFIERS + PAA
pipeline_dict.update({"PAA TSRF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
TimeSeries_Forest())})
pipeline_dict.update({"TSRF":make_pipeline(TimeSeries_Forest())})
pipeline_dict.update({"PAA BOSSVS":make_pipeline(PiecewiseApproximation_transform(output_size=size),
BOSSVS_classif())})
pipeline_dict.update({"PAA KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
KNN_TS_classif())})
pipeline_dict.update({"BOSSVS":make_pipeline(BOSSVS_classif())})
pipeline_dict.update({"PAA RISE":make_pipeline(PiecewiseApproximation_transform(output_size=size),
RISE())})
#TIME SERIE CLASSIFIERS + PAA + SAX
pipeline_dict.update({"KNN":make_pipeline(KNN_TS_classif())})
pipeline_dict.update({"RISE":make_pipeline(RISE())})
#TIME SERIE CLASSIFIERS + SAX
pipeline_dict.update({"PAA SAX TSRF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
pipeline_dict.update({"SAX TSRF":make_pipeline(SymbolicAggregate_transform(),
TimeSeries_Forest())})
pipeline_dict.update({"PAA SAX BOSSVS":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
pipeline_dict.update({"SAX BOSSVS":make_pipeline(SymbolicAggregate_transform(),
BOSSVS_classif())})
pipeline_dict.update({"PAA SAX KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
pipeline_dict.update({"SAX KNN":make_pipeline(SymbolicAggregate_transform(),
KNN_TS_classif())})
pipeline_dict.update({"PAA SAX RISE":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
pipeline_dict.update({"SAX RISE":make_pipeline(SymbolicAggregate_transform(),
RISE())})
#TIME SERIE CLASSIFIERS + PAA + SFA
pipeline_dict.update({"PAA SFA TSRF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicFourrier_transform(),
#TIME SERIE CLASSIFIERS + SFA
pipeline_dict.update({"SFA TSRF":make_pipeline(SymbolicFourrier_transform(),
TimeSeries_Forest())})
#BOSSVS natively perform SFA on input so no point in testing it here
pipeline_dict.update({"PAA SFA KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicFourrier_transform(),
pipeline_dict.update({"SFA KNN":make_pipeline(SymbolicFourrier_transform(),
KNN_TS_classif())})
#RISE apply techniques such as power spectrum and autocorrelation that are supposed to be applied in the time domain.
#SFA use Fourrier transform (DFT) and they binning with MCB, the result of this operation is not in the time domain anymore.
#TIME SERIE CLASSIFIERS + PAA + MATRIX PROFILE
#TIME SERIE CLASSIFIERS + MATRIX PROFILE
pipeline_dict.update({"PAA MP TSRF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
pipeline_dict.update({"MP TSRF":make_pipeline(MatrixProfile_transform(),
TimeSeries_Forest())})
pipeline_dict.update({"PAA MP BOSSVS":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
pipeline_dict.update({"MP BOSSVS":make_pipeline(MatrixProfile_transform(),
BOSSVS_classif())})
pipeline_dict.update({"PAA MP KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
pipeline_dict.update({"MP KNN":make_pipeline(MatrixProfile_transform(),
KNN_TS_classif())})
pipeline_dict.update({"PAA MP RISE":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
pipeline_dict.update({"MP RISE":make_pipeline(MatrixProfile_transform(),
RISE())})
#PAA + ROCKET
pipeline_dict.update({"PAA ROCKET RF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
ROCKET_transform(flatten=True),
# ROCKET
pipeline_dict.update({"ROCKET RF":make_pipeline(ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Random_Forest())})
pipeline_dict.update({"PAA ROCKET SVM":make_pipeline(PiecewiseApproximation_transform(output_size=size),
ROCKET_transform(flatten=True),
pipeline_dict.update({"ROCKET SVM":make_pipeline(ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
SVM_classif())})
pipeline_dict.update({"PAA ROCKET KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
ROCKET_transform(flatten=True),
pipeline_dict.update({"ROCKET KNN":make_pipeline(ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
KNN_classif())})
pipeline_dict.update({"PAA ROCKET Ridge":make_pipeline(PiecewiseApproximation_transform(output_size=size),
ROCKET_transform(flatten=True),
pipeline_dict.update({"ROCKET Ridge":make_pipeline(ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Ridge_classif())})
#PAA + MATRIX PROFILE + ROCKET
pipeline_dict.update({"PAA MP ROCKET RF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
# MATRIX PROFILE + ROCKET
pipeline_dict.update({"MP ROCKET RF":make_pipeline(MatrixProfile_transform(),
ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Random_Forest())})
pipeline_dict.update({"PAA MP ROCKET SVM":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
pipeline_dict.update({"MP ROCKET SVM":make_pipeline(MatrixProfile_transform(),
ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
SVM_classif())})
pipeline_dict.update({"PAA MP ROCKET KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
pipeline_dict.update({"MP ROCKET KNN":make_pipeline(MatrixProfile_transform(),
ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
KNN_classif())})
pipeline_dict.update({"PAA MP ROCKET Ridge":make_pipeline(PiecewiseApproximation_transform(output_size=size),
MatrixProfile_transform(),
pipeline_dict.update({"MP ROCKET Ridge":make_pipeline(MatrixProfile_transform(),
ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Ridge_classif())})
#PAA + SAX + ROCKET
pipeline_dict.update({"PAA SAX ROCKET RF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
#SAX + ROCKET
pipeline_dict.update({"SAX ROCKET RF":make_pipeline(SymbolicAggregate_transform(),
ROCKET_transform(flatten=True),
Random_Forest())})
pipeline_dict.update({"PAA SAX ROCKET Ridge":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
pipeline_dict.update({"SAX ROCKET Ridge":make_pipeline(SymbolicAggregate_transform(),
ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Ridge_classif())})
pipeline_dict.update({"PAA SAX ROCKET SVM":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
pipeline_dict.update({"SAX ROCKET SVM":make_pipeline(SymbolicAggregate_transform(),
ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
SVM_classif())})
pipeline_dict.update({"PAA SAX ROCKET KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
SymbolicAggregate_transform(),
pipeline_dict.update({"SAX ROCKET KNN":make_pipeline(SymbolicAggregate_transform(),
ROCKET_transform(flatten=True),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
KNN_classif())})
#ROCKET on SFA is not efficient, rocket can already extract frequency based features due to the nature of convolutional kernels.
#PAA + MP + STACKED FLAT IMAGES
pipeline_dict.update({"PAA MP Gramian + Recurrence RF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
#MP + STACKED FLAT IMAGES
pipeline_dict.update({"MP Gramian + Recurrence RF":make_pipeline(
MatrixProfile_transform(),
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Random_Forest())})
pipeline_dict.update({"PAA MP Gramian + Recurrence SVM":make_pipeline(PiecewiseApproximation_transform(output_size=size),
pipeline_dict.update({"MP Gramian + Recurrence SVM":make_pipeline(
MatrixProfile_transform(),
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
SVM_classif())})
pipeline_dict.update({"PAA MP Gramian + Recurrence KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
pipeline_dict.update({"MP Gramian + Recurrence KNN":make_pipeline(
MatrixProfile_transform(),
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
KNN_classif())})
pipeline_dict.update({"PAA Gramian + Recurrence RF":make_pipeline(PiecewiseApproximation_transform(output_size=size),
pipeline_dict.update({"MP Gramian + Recurrence Ridge":make_pipeline(
MatrixProfile_transform(),
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Ridge_classif())})
pipeline_dict.update({"Gramian + Recurrence RF":make_pipeline(
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Random_Forest())})
#PAA + STACKED FLAT IMAGES
pipeline_dict.update({"PAA Gramian + Recurrence SVM":make_pipeline(PiecewiseApproximation_transform(output_size=size),
pipeline_dict.update({"Gramian + Recurrence SVM":make_pipeline(
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
SVM_classif())})
pipeline_dict.update({"PAA Gramian + Recurrence KNN":make_pipeline(PiecewiseApproximation_transform(output_size=size),
pipeline_dict.update({"Gramian + Recurrence KNN":make_pipeline(
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"), threshold=0.000001),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
KNN_classif())})
"""
pipeline_dict.update({"Gramian + Recurrence Ridge":make_pipeline(
FeatureUnion([
("gramian",Gramian_transform(flatten=True)),
("recurrence",Recurrence_transform(flatten=True))
]),
MinMaxScaler(),
SelectFromModel(ExtraTreesClassifier(n_estimators=300, class_weight="balanced_subsample"),
max_features=max_features, threshold=0.000001),
Ridge_classif())})
#This section is left commented so you have no trouble running the script without Tensorflow/GPU
#If you have error during cross validation, you can try to make the class ResNetV2
# inherit the tensorflow.keras KerasClassifier wrapper, it can fix some issues.
pipeline_dict.update({"PAA Gramian ResNet50V2":make_pipeline(Gramian_transform(flatten=True),
"""
pipeline_dict.update({"Gramian ResNet50V2":make_pipeline(Gramian_transform(flatten=True),
ResNetV2())})
pipeline_dict.update({"PAA Recurrence ResNet50V2":make_pipeline(Recurrence_transform(flatten=True),
pipeline_dict.update({"Recurrence ResNet50V2":make_pipeline(Recurrence_transform(flatten=True),
ResNetV2())})
pipeline_dict.update({"InceptionTime":make_pipeline(InceptionTime())})
pipeline_dict.update({"MP InceptionTime":make_pipeline(MatrixProfile_transform(),
InceptionTime())})
pipeline_dict.update({"SAX InceptionTime":make_pipeline(SymbolicAggregate_transform(),
InceptionTime())})
"""
......@@ -509,29 +575,39 @@ df_res = pd.DataFrame(columns=['name','representation','balanced accuracy mean',
print('Cross Validation')
order = {0:'R1',1:'R2',2:'R3',3:'R4'}
print("A total of {} runs will be launched".format(len(pipeline_dict)*n_splits*len(order)))
for i_r, dic_func in enumerate([get_R1_dict, get_R2_dict, get_R3_dict, get_R4_dict]):
X = np.asarray([apply_code_dict(x.copy(deep=True),dic_func(codes))['cod_evt'].values for x in life_cycles[:,0] if x is not None],dtype=object)
print("A total of {} runs will be launched".format(len(pipeline_dict)*n_splits*len(order)))
#, get_R2_dict, get_R3_dict, get_R4_dict
for i_r, dic_func in enumerate([get_R1_dict]):
code_dict = dic_func(codes)
if order[i_r] == 'R1':
fill_value = len(list(code_dict.values()))
elif order[i_r] == 'R2':
fill_value = np.max(list(code_dict.values())) + 1000
elif order[i_r] == 'R3':
fill_value = 1000
elif order[i_r] == 'R4':
fill_value = code_dict['-1']
X = [apply_code_dict(x[0].copy(deep=True), code_dict).resample(resample_freq,on='date',convention='end',origin='start_day').mean().fillna(fill_value) for x in life_cycles if x is not None]
y = np.asarray([x[1] for x in life_cycles if x is not None]).astype(int)
idx = np.where([x.shape[0]>=size for x in X])[0]
X = X[idx]
y = y[idx]
if TSCHIEF_path is not None:
skf = StratifiedKFold(n_splits=n_splits)
paa = PiecewiseApproximation_transform(size)
X_paa = paa.transform(X)
y_paa = y
X = np.asarray([x.reindex(pd.date_range(start=x.index[-1].date()-timedelta(days=n_days),
end=x.index[-1].date(), freq=resample_freq)).fillna(fill_value).values for x in X])
print(X.shape)
print(np.bincount(y))
df = pd.DataFrame(data = {i: x.reshape(-1) for i,x in enumerate(X_paa)}).transpose()
df[size]=y_paa
if TSCHIEF_path is not None:
skf = StratifiedKFold(n_splits=n_splits)
df = pd.DataFrame(data = {i: x.reshape(-1) for i,x in enumerate(X)}).transpose()
df[X.shape[1]]=y
df = df.astype(np.float32)
i_split=0
for train_idx, test_idx in skf.split(X,y):
df.loc[train_idx].to_csv(TSCHIEF_path+'data_Train_{}_{}_{}.csv'.format(size, i_split, order[i_r]),index=False,header=False)
df.loc[test_idx].to_csv(TSCHIEF_path+'data_Test_{}_{}_{}.csv'.format(size, i_split, order[i_r]),index=False,header=False)
df.loc[train_idx].to_csv(TSCHIEF_path+'data_Train_{}_{}_{}.csv'.format(X.shape[1], i_split, order[i_r]),index=False,header=False)
df.loc[test_idx].to_csv(TSCHIEF_path+'data_Test_{}_{}_{}.csv'.format(X.shape[1], i_split, order[i_r]),index=False,header=False)
i_split+=1
......@@ -574,16 +650,15 @@ for i_r, dic_func in enumerate([get_R1_dict, get_R2_dict, get_R3_dict, get_R4_di
'Score time std':[np.std(cv['score_time'])]})
])
# In[15]:
df_res.to_csv(result_path+'cv_results.csv',sep=csv_separator, index=False)
df_res.to_csv(result_path+'cv_results'+str(n_days)+'J_'+resample_freq+'.csv',sep=csv_separator, index=False)
if produce_latex is not None:
df_dict = {'name':df_res['name'].unique()}
for col in ['balanced accuracy','CFI','F1 score','Fit time','Score time']:
for r in ['R1', 'R2', 'R3','R4']:
df_dict.update({col+' '+r:(df_res[df_res['representation']==r][col + ' mean'].astype(str).str[0:5] + '(+/- '+df_res[df_res['representation']==r][col+' std'].astype(str).str[0:5]+')').reset_index(drop=True)})
df_latex = pd.DataFrame(df_dict)
df_latex.to_csv(result_path+'cv_results_latex.csv',sep=csv_separator, index=False)
df_latex.to_csv(result_path+'cv_results'+str(n_days)+'J_'+resample_freq+'_latex.csv',sep=csv_separator, index=False)
latex_str = df_latex.sort_values(by=['CFI R3'],ascending=True).to_latex(index=False)
with open(produce_latex, 'w') as f:
f.write(latex_str)
......
......@@ -34,6 +34,12 @@ Configuration parameters are located at the beginning of CV_script, you MUST cha
To change or check the algorithms parameters, they all are redefined in custom wrapper classes to avoid errors, if a parameter is not specified in the constructor, it is left as default.
The representations methods are defined inside utils.representations and the classifications methods inside utils.classifications.
To change the parameter of TS-CHIEF, you can change the values of the following arguments in the ts-chief script:
```bash
-trees="300" -s="ee:4,boss:50,rise:50"
```
If you want to give more predictive power to this algorithm, increasing the number of trees and the number of random split generated by each method (boss, rise, ...) is the way to go. We used those value to avoid memory errors, the shorter the input time series, the higher those values can be without causing trouble.
## Usage
Extract the files of the dataset archive located in ~/datasets in the dataset folder
......@@ -74,8 +80,6 @@ by
from sktime.utils.data_container import tabularize, from_3d_numpy_to_nested
```
* We also modified InceptionTime to use binary_crossentropy (change loss name and use sigmod layer with 1 neuron as an output) and weighted accuracy for early stopping. This is not mandatory but is more suited to our problem.
## Contributing
If any bug should occur, please open a issue so we can work on a fix !
......
#!/bin/bash
n_cv=9
n_r=4
size=1000
size=1513
for id_r in `seq 1 $n_r`
do
for id_cv in `seq 0 $n_cv`
do
jdk/jdk-15/bin/java -jar tschief.jar -train="datasets/TSCHIEF/data_Train_"$size"_"$id_cv"_R"$id_r".csv" -test="datasets/TSCHIEF/data_Test_"$size"_"$id_cv"_R"$id_r".csv" -out="results/TSCHIEF/" -repeats="1" -trees="500" -s="ee:10,boss:150,rise:150" -export="1" -verbosity="1" -shuffle="True" -target_column="last"
jdk/jdk-15/bin/java -Xms6G -Xmx12G -jar tschief.jar -train="datasets/TSCHIEF/data_Train_"$size"_"$id_cv"_R"$id_r".csv" -test="datasets/TSCHIEF/data_Test_"$size"_"$id_cv"_R"$id_r".csv" -out="results/TSCHIEF/" -repeats="1" -trees="300" -s="ee:4,boss:50,rise:50" -export="1" -verbosity="1" -shuffle="True" -target_column="last"
done
done
......@@ -9,7 +9,7 @@ from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import RidgeClassifier
from sktime.classification.interval_based import TimeSeriesForest
from sktime.utils.data_container import concat_nested_arrays as cna
from sktime.utils.data_container import _concat_nested_arrays as cna
from sktime.classification.frequency_based import RandomIntervalSpectralForest
from sklearn.base import BaseEstimator, ClassifierMixin
......@@ -58,14 +58,13 @@ class ResNetV2(BaseEstimator, ClassifierMixin):
classes=1,
classifier_activation="sigmoid",
)
model.compile(optimizer=self.optimizer, loss=self.loss, weighted_metrics=['accuracy'])
model.compile(optimizer=self.optimizer, loss=self.loss, metrics=['accuracy'])
self.model = model
def fit(self, X, y, epochs=1000, batch_size=32, return_hist=False, el_patience=70, verbose=0, val_size=0.1):
def fit(self, X, y, epochs=1500, batch_size=32, return_hist=False, el_patience=100, verbose=0, val_size=0.1):
self.init_model((X.shape[1], X.shape[2], X.shape[3]))
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=val_size)
el = EarlyStopping(monitor='val_accuracy', patience=el_patience, restore_best_weights=True, mode='max')
cw = compute_class_weight('balanced', np.unique(y_train), y_train)
el = EarlyStopping(monitor='val_loss', patience=el_patience, restore_best_weights=True, mode='min')
self.model.fit(
X_train, y_train,
......@@ -74,8 +73,7 @@ class ResNetV2(BaseEstimator, ClassifierMixin):
batch_size=batch_size,
verbose=verbose,
callbacks=[el],
shuffle=True,
class_weight={0:cw[0],1:cw[1]}
shuffle=True
)
return self
......@@ -85,7 +83,7 @@ class ResNetV2(BaseEstimator, ClassifierMixin):
def predict_proba(self,X):
return self.model.predict(X)
#Depending on your sktime_dl version, this might throw import errors, see Readme for a fix.
from sktime_dl.deeplearning.inceptiontime._classifier import InceptionTimeClassifier
......@@ -100,8 +98,8 @@ class InceptionTime(BaseEstimator, ClassifierMixin):
el_patience=100, verbose=False, val_size=0.1):
self.model = InceptionTimeClassifier(verbose=verbose, depth=self.depth,
nb_filters=self.nb_filters, bottleneck_size=self.bottleneck_size,
callbacks=[EarlyStopping(monitor='val_accuracy', patience=el_patience,
restore_best_weights=True, mode='max')])
callbacks=[EarlyStopping(monitor='val_loss', patience=el_patience,
restore_best_weights=True, mode='min')])
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=val_size)
self.model.fit(X_train, y_train, validation_X=X_val,validation_y=y_val)
......@@ -141,7 +139,8 @@ class RISE(BaseEstimator, ClassifierMixin, SktimeEstimator):
def fit(self,X,y):
X, y = self._sktime_format(X,y)
self.estimator = RandomIntervalSpectralForest(n_estimators=self.n_estimators, min_interval=self.min_length)
self.estimator = RandomIntervalSpectralForest(n_estimators=self.n_estimators,
min_interval=self.min_length)
self.estimator.fit(X,y)
return self
......@@ -155,11 +154,10 @@ class RISE(BaseEstimator, ClassifierMixin, SktimeEstimator):
class Random_Forest(BaseEstimator, ClassifierMixin):
def __init__(self, n_estimators=300, max_depth=None, max_features=0.75, max_samples=0.75,
def __init__(self, n_estimators=300, max_depth=None, max_samples=0.75,
ccp_alpha=0.0225, class_weight="balanced_subsample"):
self.n_estimators=n_estimators
self.max_depth=max_depth
self.max_features=max_features
self.max_samples=max_samples
self.ccp_alpha=ccp_alpha
self.class_weight=class_weight
......@@ -167,9 +165,11 @@ class Random_Forest(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
X = np.asarray([x.astype(np.float32) for x in X])
self.estimator = RandomForestClassifier(n_estimators=self.n_estimators, max_depth=self.max_depth,
max_features=self.max_features, max_samples=self.max_samples,
ccp_alpha=self.ccp_alpha,class_weight=self.class_weight)
self.estimator = RandomForestClassifier(n_estimators=self.n_estimators,
max_depth=self.max_depth,
max_samples=self.max_samples,
ccp_alpha=self.ccp_alpha,
class_weight=self.class_weight)
self.estimator.fit(X,y)
return self
......@@ -183,14 +183,15 @@ class Random_Forest(BaseEstimator, ClassifierMixin):
return self.estimator.predict_proba(X)
class KNN_classif(BaseEstimator, ClassifierMixin):
def __init__(self, n_neighbors=9, weights='distance',p=2):
def __init__(self, n_neighbors=7, weights='distance',p=2):
self.n_neighbors = n_neighbors
self.weights = weights
self.p = p
self.estimator = None
def fit(self,X,y):
self.estimator = KNeighborsClassifier(n_neighbors=self.n_neighbors, weights=self.weights, p=self.p)
self.estimator = KNeighborsClassifier(n_neighbors=self.n_neighbors,
weights=self.weights, p=self.p)
self.estimator.fit(X,y)
return self
......@@ -201,7 +202,7 @@ class KNN_classif(BaseEstimator, ClassifierMixin):
return self.estimator.predict_proba(X)
class TimeSeries_Forest(BaseEstimator, ClassifierMixin, SktimeEstimator):
def __init__(self, n_estimators=300, min_interval=3):
def __init__(self, n_estimators=300, min_interval=5):
self.n_estimators = n_estimators
self.min_interval = min_interval
self.estimator = None
......@@ -228,7 +229,7 @@ class SVM_classif(BaseEstimator, ClassifierMixin):
cache_size=500, class_weight='balanced'):
self.C = C
self.kernel = kernel
self.degree = degree
self.degree = degree #Not used with RBF
self.gamma = gamma
self.cache_size = cache_size
self.class_weight = class_weight
......@@ -275,7 +276,7 @@ class Ridge_classif(BaseEstimator, ClassifierMixin):
return self.estimator.predict_proba(X)
class KNN_TS_classif(BaseEstimator, ClassifierMixin, PytsEstimator):
def __init__(self, n_neighbors=9, weights='distance', p=2):
def __init__(self, n_neighbors=7, weights='distance', p=2):
self.n_neighbors = n_neighbors
self.weights = weights
self.p = p
......@@ -296,11 +297,10 @@ class KNN_TS_classif(BaseEstimator, ClassifierMixin, PytsEstimator):
X = self._format_X(X)
return self.estimator.predict_proba(X)
class BOSSVS_classif(BaseEstimator, ClassifierMixin, PytsEstimator):
def __init__(self, word_size=9, n_bins=7, window_size=0.2, window_step=1,
def __init__(self, word_size=5, n_bins=5, window_size=0.15, window_step=0.01,
anova=True, drop_sum=False, norm_mean=False, norm_std=False,
strategy='uniform', alphabet=None):
strategy='quantile', alphabet=None,smooth_idf=True):
self.word_size = word_size
self.n_bins = n_bins
self.window_size = window_size
......@@ -311,6 +311,7 @@ class BOSSVS_classif(BaseEstimator, ClassifierMixin, PytsEstimator):
self.norm_std = norm_std
self.strategy = strategy
self.alphabet = alphabet
self.smooth_idf = smooth_idf
self.estimator = None
def fit(self,X,y):
......@@ -319,7 +320,8 @@ class BOSSVS_classif(BaseEstimator, ClassifierMixin, PytsEstimator):
window_size=self.window_size, window_step=self.window_step,
anova=self.anova, drop_sum=self.drop_sum,
norm_mean=self.norm_mean, norm_std=self.norm_std,
strategy=self.strategy, alphabet=self.alphabet)
strategy=self.strategy, alphabet=self.alphabet,
smooth_idf=self.smooth_idf)
self.estimator.fit(X,y)
return self
......
......@@ -109,7 +109,7 @@ class PiecewiseApproximation_transform(BaseEstimator, TransformerMixin):
return self
class SymbolicAggregate_transform(BaseEstimator, TransformerMixin):
def __init__(self, n_bins=7, strategy='uniform', alphabet='ordinal'):
def __init__(self, n_bins=5, strategy='uniform', alphabet='ordinal'):
self.n_bins = n_bins
self.strategy = strategy
self.alphabet = alphabet
......@@ -128,8 +128,8 @@ class SymbolicAggregate_transform(BaseEstimator, TransformerMixin):
return self
class SymbolicFourrier_transform(BaseEstimator, TransformerMixin):
def __init__(self, n_coefs=20, n_bins=7, strategy='uniform', drop_sum=False,
anova=True, norm_mean=True, norm_std=False, alphabet='ordinal'):
def __init__(self, n_coefs=10, n_bins=5, strategy='uniform', drop_sum=True,
anova=True, norm_mean=False, norm_std=False, alphabet='ordinal'):
self.n_coefs = n_coefs
self.n_bins = n_bins
self.strategy = strategy
......@@ -156,7 +156,7 @@ class SymbolicFourrier_transform(BaseEstimator, TransformerMixin):
class MatrixProfile_transform():
def __init__(self, window_size=0.075):
def __init__(self, window_size=0.15):
self.window_size = window_size
def transform(self, X, y=None):
......@@ -170,7 +170,7 @@ class MatrixProfile_transform():
return self
class ROCKET_transform(BaseEstimator, TransformerMixin):
def __init__(self, n_kernels=15000, kernel_sizes=(5,7,9), flatten=False):
def __init__(self, n_kernels=20000, kernel_sizes=(5,7,9,11), flatten=False):
self.flatten = flatten
self.n_kernels = n_kernels
self.kernel_sizes = kernel_sizes
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment