Time Series Forecasting - Box-Jekins Methon (ARIMA) in Python and

Time Series Forecasting - FBProphet in Python

Belgium Inflation - Consumer Price Annual Percentage Forecasting

In [3]:
# ignore warnings
import pandas as pd
import warnings
warnings.filterwarnings("ignore")

Load dataset

In [4]:
# load dataset
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot

filename = 'Belgium_Inflation_ConsumerPriceData_Annual_percentages.csv'

df = read_csv(filename)
df = df.set_index('Year')

df.plot(figsize = (8,6))

fig = pyplot.figure(figsize = (8,6))
autocorrelation_plot(df)
pyplot.show()

print(df.head(5))
      Inflation_ConsumerPrice_Annual_Percentage
Year                                           
1960                                   0.299467
1961                                   0.992676
1962                                   1.404607
1963                                   2.148003
1964                                   4.168761

Autocorrelation and Partial Autocorrelation in Python

In [5]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(6,4), 'figure.dpi':120})

plot_acf(df)
pyplot.show()
In [6]:
from statsmodels.graphics.tsaplots import plot_pacf

plot_pacf(df)
pyplot.show()

ADF test

In [7]:
from statsmodels.tsa.stattools import adfuller

# ADF Test
def adf_test(series):
    result = adfuller(series, autolag='AIC')
    print(); print(f'ADF Statistic: {result[0]}')
    print();  print(f'n_lags: {result[1]}')
    print();  print(f'p-value: {result[1]}')

    print(); print('Critial Values:')
    for key, value in result[4].items():
        print(f'   {key}, {value}')   

adf_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
ADF Statistic: -2.6678791362493506

n_lags: 0.07976340748206456

p-value: 0.07976340748206456

Critial Values:
   1%, -3.5463945337644063
   5%, -2.911939409384601
   10%, -2.5936515282964665

KPSS Test

In [8]:
from statsmodels.tsa.stattools import kpss

def kpss_test(series, **kw):    
    
    statistic, p_value, n_lags, critical_values = kpss(series, **kw)
    
    # Format Output
    print(); print(f'KPSS Statistic: {statistic}')
    print(); print(f'p-value: {p_value}')
    print(); print(f'num lags: {n_lags}')
    print(); print('Critial Values:')
    for key, value in critical_values.items():
        print(f'   {key} : {value}')
    
kpss_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
KPSS Statistic: 0.27963499498882644

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739
/home/crown/miniconda/envs/nilimeshdss/lib/python3.6/site-packages/statsmodels/tsa/stattools.py:1278: InterpolationWarning: p-value is greater than the indicated p-value
  warn("p-value is greater than the indicated p-value", InterpolationWarning)

How to find the order of differencing (d) in ARIMA model

In [9]:
import numpy as np, pandas as pd
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.figsize':(12,14), 'figure.dpi':120})

# Import data
#df = pd.read_csv('shampoo.csv', header=0, names = ['Sales'])
df.reset_index(drop=True, inplace=True)

# Original Series
fig, axes = plt.subplots(5, 2, sharex=True)
axes[0, 0].plot(df.values); axes[0, 0].set_title('Original Series')
plot_acf(df.values, ax=axes[0, 1])

# 1st Differencing
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
axes[1, 0].plot(df1); axes[1, 0].set_title('1st Order Differencing')
plot_acf(df1.dropna(), ax=axes[1, 1])

# 2nd Differencing
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
axes[2, 0].plot(df2); axes[2, 0].set_title('2nd Order Differencing')
plot_acf(df2.dropna(), ax=axes[2, 1])

# 3rd Differencing
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
axes[3, 0].plot(df3); axes[3, 0].set_title('3rd Order Differencing')
plot_acf(df3.dropna(), ax=axes[3, 1])

# 3rd Differencing
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
axes[4, 0].plot(df4); axes[4, 0].set_title('4th Order Differencing')
plot_acf(df4.dropna(), ax=axes[4, 1])

plt.show()

ADF and KPSS statistics

In [10]:
warnings.filterwarnings("ignore")

print("---------------------------------------------")
print("First Diffencing: ")
print("---------------------------------------------")
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
print(); print("---------------------------------------------")
adf_test(df1.dropna())
print(); print("---------------------------------------------")
kpss_test(df1.dropna())
print(); print("---------------------------------------------")


print(); print("---------------------------------------------")
print("2nd Diffencing: ")
print("---------------------------------------------")
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
print(); print("---------------------------------------------")
adf_test(df2.dropna())
print(); print("---------------------------------------------")
kpss_test(df2.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("3rd Diffencing: ")
print("---------------------------------------------")
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df3.dropna())
print(); print("---------------------------------------------")
kpss_test(df3.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("4th Diffencing: ")
print("---------------------------------------------")
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df4.dropna())
print(); print("---------------------------------------------")
kpss_test(df4.dropna())
print(); print("---------------------------------------------")
---------------------------------------------
First Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.722812582451241

n_lags: 6.878770916878362e-07

p-value: 6.878770916878362e-07

Critial Values:
   1%, -3.552928203580539
   5%, -2.9147306250000002
   10%, -2.595137155612245

---------------------------------------------

KPSS Statistic: 0.18514743744415924

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
2nd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.007880040934822

n_lags: 2.1409852571309972e-05

p-value: 2.1409852571309972e-05

Critial Values:
   1%, -3.5714715250448363
   5%, -2.922629480573571
   10%, -2.5993358475635153

---------------------------------------------

KPSS Statistic: 0.08668010572575001

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
3rd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.728040200579978

n_lags: 6.699960913156014e-07

p-value: 6.699960913156014e-07

Critial Values:
   1%, -3.5778480370438146
   5%, -2.925338105429433
   10%, -2.6007735310095064

---------------------------------------------

KPSS Statistic: 0.09249684003803695

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
4th Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.8064770572160995

n_lags: 4.5041851675640037e-07

p-value: 4.5041851675640037e-07

Critial Values:
   1%, -3.584828853223594
   5%, -2.9282991495198907
   10%, -2.6023438271604937

---------------------------------------------

KPSS Statistic: 0.09870246028870237

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------
In [ ]:
 

How to find the order of the AR term (p)

In [11]:
from statsmodels.graphics.tsaplots import plot_pacf

# PACF plot 
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()

axes[0].plot(df2); axes[0].set_title('2nd Differencing')
axes[1].set(ylim=(-3,3))
plot_pacf(df2.dropna(), ax=axes[1]) #PACF

plt.show()

How to find the order of the MA term (q)

In [12]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()
axes[0].plot(df2); axes[0].set_title('2nd Differencing')
#axes[1].set(ylim=(0,1.2))
plot_acf(df2.dropna(), ax=axes[1]) # ACF

plt.show()
In [13]:
## ADF test
adf_test(df2.dropna())
ADF Statistic: -5.007880040934822

n_lags: 2.1409852571309972e-05

p-value: 2.1409852571309972e-05

Critial Values:
   1%, -3.5714715250448363
   5%, -2.922629480573571
   10%, -2.5993358475635153

Build the ARIMA(p,d,q) Model

In [14]:
from statsmodels.tsa.arima_model import ARIMA

plt.rcParams.update({'figure.figsize':(16,6), 'figure.dpi':220})

df = read_csv(filename)
df = df.set_index('Year')

# ARIMA Model
model = ARIMA(df["Inflation_ConsumerPrice_Annual_Percentage"], order=(2,2,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())

# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1,2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()

# Actual vs Fitted
model_fit.plot_predict(dynamic=False)
plt.show()
                                          ARIMA Model Results                                           
========================================================================================================
Dep. Variable:     D2.Inflation_ConsumerPrice_Annual_Percentage   No. Observations:                   59
Model:                                           ARIMA(2, 2, 0)   Log Likelihood                -120.839
Method:                                                 css-mle   S.D. of innovations              1.872
Date:                                          Fri, 30 Jul 2021   AIC                            249.677
Time:                                                  15:54:25   BIC                            257.987
Sample:                                                       2   HQIC                           252.921
                                                                                                        
======================================================================================================================
                                                         coef    std err          z      P>|z|      [0.025      0.975]
----------------------------------------------------------------------------------------------------------------------
const                                                 -0.0209      0.143     -0.146      0.885      -0.302       0.260
ar.L1.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.4690      0.125     -3.752      0.000      -0.714      -0.224
ar.L2.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.2477      0.124     -2.003      0.050      -0.490      -0.005
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
AR.1           -0.9469           -1.7723j            2.0094           -0.3281
AR.2           -0.9469           +1.7723j            2.0094            0.3281
-----------------------------------------------------------------------------
In [ ]:
 

using Auto ARIMA

In [15]:
import pmdarima as pm

model_with_auto_d = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=None,           # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_auto_d.summary())
Fit ARIMA: order=(1, 1, 1); AIC=230.829, BIC=239.206, Fit time=0.044 seconds
Fit ARIMA: order=(0, 1, 0); AIC=228.079, BIC=232.268, Fit time=0.002 seconds
Fit ARIMA: order=(1, 1, 0); AIC=229.242, BIC=235.525, Fit time=0.012 seconds
Fit ARIMA: order=(0, 1, 1); AIC=229.073, BIC=235.356, Fit time=0.013 seconds
Total fit time: 0.084 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -112.040
Method:                           css   S.D. of innovations              1.566
Date:                Fri, 30 Jul 2021   AIC                            228.079
Time:                        15:54:27   BIC                            232.268
Sample:                             1   HQIC                           229.718
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const          0.0074      0.202      0.036      0.971      -0.389       0.404
==============================================================================
In [16]:
model_with_d_equals_1 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, max_d=4, # maximum p, q and d
                      m=1,              # frequency of series
                      
                      d=1,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_1.summary())
Fit ARIMA: order=(1, 1, 1); AIC=230.829, BIC=239.206, Fit time=0.043 seconds
Fit ARIMA: order=(0, 1, 0); AIC=228.079, BIC=232.268, Fit time=0.002 seconds
Fit ARIMA: order=(1, 1, 0); AIC=229.242, BIC=235.525, Fit time=0.012 seconds
Fit ARIMA: order=(0, 1, 1); AIC=229.073, BIC=235.356, Fit time=0.013 seconds
Total fit time: 0.070 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -112.040
Method:                           css   S.D. of innovations              1.566
Date:                Fri, 30 Jul 2021   AIC                            228.079
Time:                        15:54:27   BIC                            232.268
Sample:                             1   HQIC                           229.718
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const          0.0074      0.202      0.036      0.971      -0.389       0.404
==============================================================================
In [17]:
model_with_d_equals_2 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=2,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_2.summary())
Fit ARIMA: order=(1, 2, 1); AIC=231.777, BIC=240.087, Fit time=0.060 seconds
Fit ARIMA: order=(0, 2, 0); AIC=258.573, BIC=262.728, Fit time=0.002 seconds
Fit ARIMA: order=(1, 2, 0); AIC=251.538, BIC=257.771, Fit time=0.013 seconds
Fit ARIMA: order=(0, 2, 1); AIC=230.733, BIC=236.965, Fit time=0.023 seconds
Fit ARIMA: order=(0, 2, 2); AIC=231.609, BIC=239.919, Fit time=0.068 seconds
Fit ARIMA: order=(1, 2, 2); AIC=233.423, BIC=243.811, Fit time=0.155 seconds
Total fit time: 0.323 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                   D2.y   No. Observations:                   59
Model:                 ARIMA(0, 2, 1)   Log Likelihood                -112.366
Method:                       css-mle   S.D. of innovations              1.570
Date:                Fri, 30 Jul 2021   AIC                            230.733
Time:                        15:54:27   BIC                            236.965
Sample:                             2   HQIC                           233.166
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0098      0.012     -0.838      0.406      -0.033       0.013
ma.L1.D2.y    -1.0000      0.049    -20.302      0.000      -1.097      -0.903
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
MA.1            1.0000           +0.0000j            1.0000            0.0000
-----------------------------------------------------------------------------

How to interpret the residual plots in ARIMA model

In [18]:
model_with_auto_d.plot_diagnostics(figsize=(12,10))
plt.show()
In [19]:
model_with_d_equals_1.plot_diagnostics(figsize=(12,10))
plt.show()
In [20]:
model_with_d_equals_2.plot_diagnostics(figsize=(12,10))
plt.show()

Forecast

In [21]:
model = model_with_auto_d
In [22]:
# Forecast
n_periods = 10
fc, confint = model.predict(n_periods=n_periods, return_conf_int=True)
#index_of_fc = np.arange(len(df), len(df)+n_periods)
index_of_fc = np.arange(2020, 2020+n_periods)

# make series for plotting purpose
fc_series = pd.Series(fc, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)

# Plot
plt.plot(df)
plt.plot(fc_series, color='darkgreen')
plt.fill_between(lower_series.index, 
                 lower_series, 
                 upper_series, 
                 color='k', alpha=.15)

plt.title("Final Forecast")
plt.show()

print(); print(fc_series)
print(); print(lower_series)
print(); print(upper_series)
2020    0.748147
2021    0.755503
2022    0.762858
2023    0.770213
2024    0.777569
2025    0.784924
2026    0.792280
2027    0.799635
2028    0.806990
2029    0.814346
dtype: float64

2020   -2.320745
2021   -3.584566
2022   -4.552619
2023   -5.367571
2024   -6.084682
2025   -6.732295
2026   -7.327246
2027   -7.880503
2028   -8.399686
2029   -8.890343
dtype: float64

2020     3.817039
2021     5.095571
2022     6.078335
2023     6.907998
2024     7.639820
2025     8.302144
2026     8.911805
2027     9.479773
2028    10.013667
2029    10.519035
dtype: float64

Using FB Prophet

In [23]:
from fbprophet import Prophet
import pandas as pd

df = read_csv(filename)
#df = df.set_index('Year')

print(df.head())
#print(); print(df[['Year', 'Population']])

df["End_Year"] = 0
for i in range(0, len(df)):
    df.iloc[i, 2] = str(df.iloc[i, 0]) + '-12-' + '31'

print(); print(df.head())
   Year  Inflation_ConsumerPrice_Annual_Percentage
0  1960                                   0.299467
1  1961                                   0.992676
2  1962                                   1.404607
3  1963                                   2.148003
4  1964                                   4.168761

   Year  Inflation_ConsumerPrice_Annual_Percentage    End_Year
0  1960                                   0.299467  1960-12-31
1  1961                                   0.992676  1961-12-31
2  1962                                   1.404607  1962-12-31
3  1963                                   2.148003  1963-12-31
4  1964                                   4.168761  1964-12-31
In [24]:
# Create a new Data Frame
df_pop = pd.DataFrame()

df_pop[['ds','y']] = df[['End_Year', 'Inflation_ConsumerPrice_Annual_Percentage']]

# Convert Data Frame to FBProphet Timeseries ds and y
df_pop['ds'] = pd.to_datetime(df_pop['ds'])
df_pop['y']  = pd.to_numeric(df_pop['y'])

print(df_pop.tail())

# Create FBProphet Model with Dataset
m = Prophet(daily_seasonality=False, weekly_seasonality=True, yearly_seasonality=True)
m.fit(df_pop)

future = m.make_future_dataframe(periods=10, freq = 'Y')

print()
print(future.tail(26))

forecast = m.predict(future)
print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(25))

fig = m.plot(forecast)
plt.show()


# Save Data in a CSV file
df_final = pd.DataFrame()
df_final[['Year', 'yhat', 'yhat_lower', 'yhat_upper']] = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]

df_final['Inflation_ConsumerPrice_Annual_Percentage'] = df_pop['y']

print(df_final.head(len(df_final)))

#df_final.to_csv('Forecast_final.csv',index = False)
           ds         y
56 2016-12-31  1.973853
57 2017-12-31  2.125971
58 2018-12-31  2.053165
59 2019-12-31  1.436820
60 2020-12-31  0.740792

           ds
45 2005-12-31
46 2006-12-31
47 2007-12-31
48 2008-12-31
49 2009-12-31
50 2010-12-31
51 2011-12-31
52 2012-12-31
53 2013-12-31
54 2014-12-31
55 2015-12-31
56 2016-12-31
57 2017-12-31
58 2018-12-31
59 2019-12-31
60 2020-12-31
61 2021-12-31
62 2022-12-31
63 2023-12-31
64 2024-12-31
65 2025-12-31
66 2026-12-31
67 2027-12-31
68 2028-12-31
69 2029-12-31
70 2030-12-31
           ds      yhat  yhat_lower  yhat_upper
46 2006-12-31  1.503624   -1.466426    4.847125
47 2007-12-31  2.511560   -0.399910    5.470913
48 2008-12-31  3.025414   -0.063541    5.985459
49 2009-12-31  1.387412   -1.833339    4.554075
50 2010-12-31  2.816651   -0.479951    5.982410
51 2011-12-31  1.972561   -0.878086    5.224775
52 2012-12-31  2.252771   -0.765940    5.238069
53 2013-12-31  2.063839   -1.039706    4.916042
54 2014-12-31  2.585012   -0.218574    5.484424
55 2015-12-31  0.952656   -2.176570    4.189568
56 2016-12-31  1.713771   -1.312400    4.892512
57 2017-12-31  0.807270   -2.056506    3.846732
58 2018-12-31  1.812369   -1.298127    4.757343
59 2019-12-31  1.629083   -1.245593    4.689899
60 2020-12-31  0.693867   -2.491773    3.873838
61 2021-12-31  2.120296   -1.171511    5.087140
62 2022-12-31  1.273369   -1.837960    4.205078
63 2023-12-31  0.372514   -2.644279    3.398308
64 2024-12-31  1.370294   -1.703681    4.370705
65 2025-12-31  1.888657   -1.252249    5.074942
66 2026-12-31  0.253465   -3.004188    3.492356
67 2027-12-31  1.685541   -1.314916    4.843429
68 2028-12-31  0.113725   -2.893817    3.087760
69 2029-12-31  1.116014   -1.938784    4.152572
70 2030-12-31  0.929892   -2.100542    3.694080
         Year      yhat  yhat_lower  yhat_upper  \
0  1960-12-31  5.373781    2.560184    8.697109   
1  1961-12-31  4.467306    1.392431    7.511126   
2  1962-12-31  5.472431    2.107979    8.706678   
3  1963-12-31  5.289173    2.285186    8.546976   
4  1964-12-31  4.353983    1.188551    7.489122   
..        ...       ...         ...         ...   
66 2026-12-31  0.253465   -3.004188    3.492356   
67 2027-12-31  1.685541   -1.314916    4.843429   
68 2028-12-31  0.113725   -2.893817    3.087760   
69 2029-12-31  1.116014   -1.938784    4.152572   
70 2030-12-31  0.929892   -2.100542    3.694080   

    Inflation_ConsumerPrice_Annual_Percentage  
0                                    0.299467  
1                                    0.992676  
2                                    1.404607  
3                                    2.148003  
4                                    4.168761  
..                                        ...  
66                                        NaN  
67                                        NaN  
68                                        NaN  
69                                        NaN  
70                                        NaN  

[71 rows x 5 columns]
In [ ]: