在本文中,我们将介绍四个主要的Python库——statmodels、tslearn、tssearch、tsfresh——每个库都针对时间序列分析的不同方面进行了定制。这些库为从预测到模式识别的任务提供了强大的工具,使它们成为各种应用程序的宝贵资源。
我们使用来自Kaggle的数据集,通过加速度计数为各种身体活动进行分析。这些活动被分为12个不同的类别,每个类别对应一个特定的身体动作,如站立、坐着、行走,或从事更有活力的活动,如慢跑和骑自行车。每个活动都记录了一分钟的持续时间,提供了丰富的时间序列数据源。
用于此分析的库有:
# statsmodels
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
#tslearn
from tslearn.barycenters import dtw_barycenter_averaging
# tssearch
from tssearch import get_distance_dict, time_series_segmentation, time_series_search, plot_search_distance_result
# tsfresh
from tsfresh import extract_features
from tsfresh.feature_selection.relevance import calculate_relevance_table
from tsfresh.feature_extraction import EfficientFCParameters
from tsfresh.utilities.dataframe_functions import impute
Statsmodels
def activity_stationary_test(dataframe, sensor, activity):
dataframe.reset_index(drop=True)
adft = adfuller(dataframe[(dataframe['Activity'] == activity)][sensor], autolag='AIC')
output_df = pd.DataFrame({'Values':[adft[0], adft[1], adft[4]['1%']], 'Metric':['Test Statistics', 'p-value', 'critical value (1%)']})
print('Statistics of {} sensor:\n'.format(sensor), output_df)
print()
if (adft[1] < 0.05) & (adft[0] < adft[4]['1%']):
print('The signal is stationary')
else:
print('The signal is non-stationary')
def activity_decomposition(dataframe, sensor, activity):
dataframe.reset_index(drop=True)
data = dataframe[(dataframe['Activity'] == activity)][sensor]
decompose = seasonal_decompose(data, model='additive', extrapolate_trend='freq', period=50)
fig = decompose.plot()
fig.set_size_inches((12, 7))
fig.axes[0].set_title('Seasonal Decomposition Plot')
fig.axes[3].set_xlabel('Indices')
plt.show()
Tslearn
template_length = 150
overlap = 50 # Adjust the overlap value as needed
segments = [signal[i:i + template_length] for i in range(0, len(signal) - template_length + 1, overlap)]
template_signal = dtw_barycenter_averaging(segments)
template_signal = template_signal.flatten()
Tssearch
segment_distance = get_distance_dict(["Dynamic Time Warping"])
segment_results = time_series_segmentation(segment_distance, template_signal, signal_np)
for k in segment_results:
plt.figure(figsize=(15, 3))
plt.plot(signal_np, color='gray')
plt.vlines(segment_results[k], np.min(signal_np)-1, np.max(signal_np) + 1, 'C1')
plt.xlabel('Indices')
plt.ylabel('Amplitude')
plt.title(k)
dict_distances = {
"elastic": {
"Dynamic Time Warping": {
"function": "dtw",
"parameters": {"dtw_type": "sub-dtw", "alpha": 0.5},
}
},
"lockstep": {
"Euclidean Distance": {
"function": "euclidean_distance",
"parameters": "",
}
}
}
result = time_series_search(dict_distances, template_signal, signal_np, output=("number", 30))
plot_search_distance_result(result, signal_np)
Tsfresh
extraction_settings = EfficientFCParameters()
X_extracted = extract_features(final_df, column_id='Activity',
default_fc_parameters=extraction_settings,
# we impute = remove all NaN features automatically
impute_function=impute, show_warnings=False)
X_extracted= pd.DataFrame(X_extracted, index=X_extracted.index, columns=X_extracted.columns)
values = list(range(1, 13))
y = pd.Series(values, index=range(1, 13))
relevance_table_clf = calculate_relevance_table(X_extracted, y)
relevance_table_clf.sort_values("p_value", inplace=True)
relevance_table_clf.head(10)
top_features = relevance_table_clf["feature"].head(10)
x_features = X_extracted[top_features]
总结

