Time Series Forecasting - Box-Jekins Methon (ARIMA) in Python and

Time Series Forecasting - FBProphet in Python

Canada Inflation - Consumer Price Annual Percentage Forecasting

In [3]:
# ignore warnings
import pandas as pd
import warnings
warnings.filterwarnings("ignore")

Load dataset

In [4]:
# load dataset
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot

filename = 'Canada_Inflation_ConsumerPriceData_Annual_percentages.csv'

df = read_csv(filename)
df = df.set_index('Year')

df.plot(figsize = (8,6))

fig = pyplot.figure(figsize = (8,6))
autocorrelation_plot(df)
pyplot.show()

print(df.head(5))
      Inflation_ConsumerPrice_Annual_Percentage
Year                                           
1960                                   1.358696
1961                                   1.018767
1962                                   1.061571
1963                                   1.628151
1964                                   1.912145

Autocorrelation and Partial Autocorrelation in Python

In [5]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(6,4), 'figure.dpi':120})

plot_acf(df)
pyplot.show()
In [6]:
from statsmodels.graphics.tsaplots import plot_pacf

plot_pacf(df)
pyplot.show()

ADF test

In [7]:
from statsmodels.tsa.stattools import adfuller

# ADF Test
def adf_test(series):
    result = adfuller(series, autolag='AIC')
    print(); print(f'ADF Statistic: {result[0]}')
    print();  print(f'n_lags: {result[1]}')
    print();  print(f'p-value: {result[1]}')

    print(); print('Critial Values:')
    for key, value in result[4].items():
        print(f'   {key}, {value}')   

adf_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
ADF Statistic: -1.7975723888290407

n_lags: 0.3816675995247002

p-value: 0.3816675995247002

Critial Values:
   1%, -3.5443688564814813
   5%, -2.9110731481481484
   10%, -2.5931902777777776

KPSS Test

In [8]:
from statsmodels.tsa.stattools import kpss

def kpss_test(series, **kw):    
    
    statistic, p_value, n_lags, critical_values = kpss(series, **kw)
    
    # Format Output
    print(); print(f'KPSS Statistic: {statistic}')
    print(); print(f'p-value: {p_value}')
    print(); print(f'num lags: {n_lags}')
    print(); print('Critial Values:')
    for key, value in critical_values.items():
        print(f'   {key} : {value}')
    
kpss_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
KPSS Statistic: 0.2512044744683119

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739
/home/crown/miniconda/envs/nilimeshdss/lib/python3.6/site-packages/statsmodels/tsa/stattools.py:1278: InterpolationWarning: p-value is greater than the indicated p-value
  warn("p-value is greater than the indicated p-value", InterpolationWarning)

How to find the order of differencing (d) in ARIMA model

In [9]:
import numpy as np, pandas as pd
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.figsize':(12,14), 'figure.dpi':120})

# Import data
#df = pd.read_csv('shampoo.csv', header=0, names = ['Sales'])
df.reset_index(drop=True, inplace=True)

# Original Series
fig, axes = plt.subplots(5, 2, sharex=True)
axes[0, 0].plot(df.values); axes[0, 0].set_title('Original Series')
plot_acf(df.values, ax=axes[0, 1])

# 1st Differencing
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
axes[1, 0].plot(df1); axes[1, 0].set_title('1st Order Differencing')
plot_acf(df1.dropna(), ax=axes[1, 1])

# 2nd Differencing
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
axes[2, 0].plot(df2); axes[2, 0].set_title('2nd Order Differencing')
plot_acf(df2.dropna(), ax=axes[2, 1])

# 3rd Differencing
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
axes[3, 0].plot(df3); axes[3, 0].set_title('3rd Order Differencing')
plot_acf(df3.dropna(), ax=axes[3, 1])

# 3rd Differencing
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
axes[4, 0].plot(df4); axes[4, 0].set_title('4th Order Differencing')
plot_acf(df4.dropna(), ax=axes[4, 1])

plt.show()

ADF and KPSS statistics

In [10]:
warnings.filterwarnings("ignore")

print("---------------------------------------------")
print("First Diffencing: ")
print("---------------------------------------------")
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
print(); print("---------------------------------------------")
adf_test(df1.dropna())
print(); print("---------------------------------------------")
kpss_test(df1.dropna())
print(); print("---------------------------------------------")


print(); print("---------------------------------------------")
print("2nd Diffencing: ")
print("---------------------------------------------")
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
print(); print("---------------------------------------------")
adf_test(df2.dropna())
print(); print("---------------------------------------------")
kpss_test(df2.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("3rd Diffencing: ")
print("---------------------------------------------")
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df3.dropna())
print(); print("---------------------------------------------")
kpss_test(df3.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("4th Diffencing: ")
print("---------------------------------------------")
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df4.dropna())
print(); print("---------------------------------------------")
kpss_test(df4.dropna())
print(); print("---------------------------------------------")
---------------------------------------------
First Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.492973146795872

n_lags: 2.154511610314937e-06

p-value: 2.154511610314937e-06

Critial Values:
   1%, -3.5506699942762414
   5%, -2.913766394626147
   10%, -2.5946240473991997

---------------------------------------------

KPSS Statistic: 0.17177544116211285

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
2nd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.7120567481761055

n_lags: 7.261457567271296e-07

p-value: 7.261457567271296e-07

Critial Values:
   1%, -3.562878534649522
   5%, -2.918973284023669
   10%, -2.597393446745562

---------------------------------------------

KPSS Statistic: 0.11992633187363386

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
3rd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -4.790505147004917

n_lags: 5.67067269120967e-05

p-value: 5.67067269120967e-05

Critial Values:
   1%, -3.5778480370438146
   5%, -2.925338105429433
   10%, -2.6007735310095064

---------------------------------------------

KPSS Statistic: 0.11633217793164118

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
4th Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.34336539693194

n_lags: 4.449728549819777e-06

p-value: 4.449728549819777e-06

Critial Values:
   1%, -3.584828853223594
   5%, -2.9282991495198907
   10%, -2.6023438271604937

---------------------------------------------

KPSS Statistic: 0.1171735416539255

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------
In [ ]:
 

How to find the order of the AR term (p)

In [11]:
from statsmodels.graphics.tsaplots import plot_pacf

# PACF plot 
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()

axes[0].plot(df2); axes[0].set_title('2nd Differencing')
axes[1].set(ylim=(-3,3))
plot_pacf(df2.dropna(), ax=axes[1]) #PACF

plt.show()

How to find the order of the MA term (q)

In [12]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()
axes[0].plot(df2); axes[0].set_title('2nd Differencing')
#axes[1].set(ylim=(0,1.2))
plot_acf(df2.dropna(), ax=axes[1]) # ACF

plt.show()
In [13]:
## ADF test
adf_test(df2.dropna())
ADF Statistic: -5.7120567481761055

n_lags: 7.261457567271296e-07

p-value: 7.261457567271296e-07

Critial Values:
   1%, -3.562878534649522
   5%, -2.918973284023669
   10%, -2.597393446745562

Build the ARIMA(p,d,q) Model

In [14]:
from statsmodels.tsa.arima_model import ARIMA

plt.rcParams.update({'figure.figsize':(16,6), 'figure.dpi':220})

df = read_csv(filename)
df = df.set_index('Year')

# ARIMA Model
model = ARIMA(df["Inflation_ConsumerPrice_Annual_Percentage"], order=(2,2,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())

# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1,2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()

# Actual vs Fitted
model_fit.plot_predict(dynamic=False)
plt.show()
                                          ARIMA Model Results                                           
========================================================================================================
Dep. Variable:     D2.Inflation_ConsumerPrice_Annual_Percentage   No. Observations:                   59
Model:                                           ARIMA(2, 2, 0)   Log Likelihood                -115.687
Method:                                                 css-mle   S.D. of innovations              1.716
Date:                                          Fri, 30 Jul 2021   AIC                            239.375
Time:                                                  16:29:28   BIC                            247.685
Sample:                                                       2   HQIC                           242.619
                                                                                                        
======================================================================================================================
                                                         coef    std err          z      P>|z|      [0.025      0.975]
----------------------------------------------------------------------------------------------------------------------
const                                                 -0.0106      0.134     -0.079      0.937      -0.274       0.253
ar.L1.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.4532      0.126     -3.584      0.001      -0.701      -0.205
ar.L2.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.2254      0.125     -1.798      0.078      -0.471       0.020
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
AR.1           -1.0055           -1.8509j            2.1064           -0.3292
AR.2           -1.0055           +1.8509j            2.1064            0.3292
-----------------------------------------------------------------------------
In [ ]:
 

using Auto ARIMA

In [15]:
import pmdarima as pm

model_with_auto_d = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=None,           # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_auto_d.summary())
Fit ARIMA: order=(1, 1, 1); AIC=nan, BIC=nan, Fit time=nan seconds
Fit ARIMA: order=(0, 1, 0); AIC=215.631, BIC=219.820, Fit time=0.003 seconds
Fit ARIMA: order=(1, 1, 0); AIC=216.996, BIC=223.279, Fit time=0.013 seconds
Fit ARIMA: order=(0, 1, 1); AIC=216.836, BIC=223.119, Fit time=0.011 seconds
Total fit time: 0.040 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -105.815
Method:                           css   S.D. of innovations              1.411
Date:                Fri, 30 Jul 2021   AIC                            215.631
Time:                        16:29:31   BIC                            219.820
Sample:                             1   HQIC                           217.269
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0107      0.182     -0.059      0.953      -0.368       0.346
==============================================================================
In [16]:
model_with_d_equals_1 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, max_d=4, # maximum p, q and d
                      m=1,              # frequency of series
                      
                      d=1,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_1.summary())
Fit ARIMA: order=(1, 1, 1); AIC=nan, BIC=nan, Fit time=nan seconds
Fit ARIMA: order=(0, 1, 0); AIC=215.631, BIC=219.820, Fit time=0.002 seconds
Fit ARIMA: order=(1, 1, 0); AIC=216.996, BIC=223.279, Fit time=0.013 seconds
Fit ARIMA: order=(0, 1, 1); AIC=216.836, BIC=223.119, Fit time=0.011 seconds
Total fit time: 0.040 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -105.815
Method:                           css   S.D. of innovations              1.411
Date:                Fri, 30 Jul 2021   AIC                            215.631
Time:                        16:29:31   BIC                            219.820
Sample:                             1   HQIC                           217.269
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0107      0.182     -0.059      0.953      -0.368       0.346
==============================================================================
In [17]:
model_with_d_equals_2 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=2,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_2.summary())
Fit ARIMA: order=(1, 2, 1); AIC=219.742, BIC=228.053, Fit time=0.059 seconds
Fit ARIMA: order=(0, 2, 0); AIC=247.149, BIC=251.304, Fit time=0.003 seconds
Fit ARIMA: order=(1, 2, 0); AIC=240.507, BIC=246.740, Fit time=0.012 seconds
Fit ARIMA: order=(0, 2, 1); AIC=218.464, BIC=224.696, Fit time=0.020 seconds
Fit ARIMA: order=(0, 2, 2); AIC=219.578, BIC=227.888, Fit time=0.048 seconds
Fit ARIMA: order=(1, 2, 2); AIC=221.536, BIC=231.923, Fit time=0.125 seconds
Total fit time: 0.267 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                   D2.y   No. Observations:                   59
Model:                 ARIMA(0, 2, 1)   Log Likelihood                -106.232
Method:                       css-mle   S.D. of innovations              1.415
Date:                Fri, 30 Jul 2021   AIC                            218.464
Time:                        16:29:31   BIC                            224.696
Sample:                             2   HQIC                           220.897
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0090      0.011     -0.855      0.396      -0.030       0.012
ma.L1.D2.y    -1.0000      0.051    -19.504      0.000      -1.100      -0.900
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
MA.1            1.0000           +0.0000j            1.0000            0.0000
-----------------------------------------------------------------------------

How to interpret the residual plots in ARIMA model

In [18]:
model_with_auto_d.plot_diagnostics(figsize=(12,10))
plt.show()
In [19]:
model_with_d_equals_1.plot_diagnostics(figsize=(12,10))
plt.show()
In [20]:
model_with_d_equals_2.plot_diagnostics(figsize=(12,10))
plt.show()

Forecast

In [21]:
model = model_with_auto_d
In [22]:
# Forecast
n_periods = 10
fc, confint = model.predict(n_periods=n_periods, return_conf_int=True)
#index_of_fc = np.arange(len(df), len(df)+n_periods)
index_of_fc = np.arange(2020, 2020+n_periods)

# make series for plotting purpose
fc_series = pd.Series(fc, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)

# Plot
plt.plot(df)
plt.plot(fc_series, color='darkgreen')
plt.fill_between(lower_series.index, 
                 lower_series, 
                 upper_series, 
                 color='k', alpha=.15)

plt.title("Final Forecast")
plt.show()

print(); print(fc_series)
print(); print(lower_series)
print(); print(upper_series)
2020    0.706305
2021    0.695610
2022    0.684915
2023    0.674220
2024    0.663525
2025    0.652830
2026    0.642135
2027    0.631440
2028    0.620745
2029    0.610050
dtype: float64

2020   -2.060182
2021   -3.216793
2022   -4.106781
2023   -4.858754
2024   -5.522528
2025   -6.123651
2026   -6.677301
2027   -7.193366
2028   -7.678715
2029   -8.138349
dtype: float64

2020    3.472792
2021    4.608013
2022    5.476611
2023    6.207194
2024    6.849578
2025    7.429311
2026    7.961571
2027    8.456247
2028    8.920206
2029    9.358450
dtype: float64

Using FB Prophet

In [23]:
from fbprophet import Prophet
import pandas as pd

df = read_csv(filename)
#df = df.set_index('Year')

print(df.head())
#print(); print(df[['Year', 'Population']])

df["End_Year"] = 0
for i in range(0, len(df)):
    df.iloc[i, 2] = str(df.iloc[i, 0]) + '-12-' + '31'

print(); print(df.head())
   Year  Inflation_ConsumerPrice_Annual_Percentage
0  1960                                   1.358696
1  1961                                   1.018767
2  1962                                   1.061571
3  1963                                   1.628151
4  1964                                   1.912145

   Year  Inflation_ConsumerPrice_Annual_Percentage    End_Year
0  1960                                   1.358696  1960-12-31
1  1961                                   1.018767  1961-12-31
2  1962                                   1.061571  1962-12-31
3  1963                                   1.628151  1963-12-31
4  1964                                   1.912145  1964-12-31
In [24]:
# Create a new Data Frame
df_pop = pd.DataFrame()

df_pop[['ds','y']] = df[['End_Year', 'Inflation_ConsumerPrice_Annual_Percentage']]

# Convert Data Frame to FBProphet Timeseries ds and y
df_pop['ds'] = pd.to_datetime(df_pop['ds'])
df_pop['y']  = pd.to_numeric(df_pop['y'])

print(df_pop.tail())

# Create FBProphet Model with Dataset
m = Prophet(daily_seasonality=False, weekly_seasonality=True, yearly_seasonality=True)
m.fit(df_pop)

future = m.make_future_dataframe(periods=10, freq = 'Y')

print()
print(future.tail(26))

forecast = m.predict(future)
print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(25))

fig = m.plot(forecast)
plt.show()


# Save Data in a CSV file
df_final = pd.DataFrame()
df_final[['Year', 'yhat', 'yhat_lower', 'yhat_upper']] = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]

df_final['Inflation_ConsumerPrice_Annual_Percentage'] = df_pop['y']

print(df_final.head(len(df_final)))

#df_final.to_csv('Forecast_final.csv',index = False)
           ds         y
56 2016-12-31  1.428760
57 2017-12-31  1.596884
58 2018-12-31  2.268226
59 2019-12-31  1.949269
60 2020-12-31  0.717000

           ds
45 2005-12-31
46 2006-12-31
47 2007-12-31
48 2008-12-31
49 2009-12-31
50 2010-12-31
51 2011-12-31
52 2012-12-31
53 2013-12-31
54 2014-12-31
55 2015-12-31
56 2016-12-31
57 2017-12-31
58 2018-12-31
59 2019-12-31
60 2020-12-31
61 2021-12-31
62 2022-12-31
63 2023-12-31
64 2024-12-31
65 2025-12-31
66 2026-12-31
67 2027-12-31
68 2028-12-31
69 2029-12-31
70 2030-12-31
           ds      yhat  yhat_lower  yhat_upper
46 2006-12-31  2.356120   -0.820258    5.666982
47 2007-12-31  3.141974   -0.130407    6.381995
48 2008-12-31  3.256547   -0.087277    6.739570
49 2009-12-31  1.760603   -1.530684    5.277716
50 2010-12-31  2.346060   -1.030326    6.093897
51 2011-12-31  2.097820   -1.351570    5.746997
52 2012-12-31  2.116109   -0.917915    5.455995
53 2013-12-31  1.901146   -1.440025    5.261138
54 2014-12-31  3.299418   -0.262823    6.725380
55 2015-12-31  1.758998   -1.915112    4.965092
56 2016-12-31  1.071955   -2.150908    4.452622
57 2017-12-31  1.350751   -1.960736    4.556893
58 2018-12-31  2.158979   -1.347182    5.794972
59 2019-12-31  1.899541   -1.571584    5.472205
60 2020-12-31  0.733133   -2.866012    4.317578
61 2021-12-31  1.340692   -2.027937    4.368550
62 2022-12-31  1.114825   -2.228058    4.459143
63 2023-12-31  1.349146   -2.202395    4.504832
64 2024-12-31  0.873676   -2.344742    4.098328
65 2025-12-31  2.294049   -1.105966    5.874749
66 2026-12-31  0.776003   -2.505651    4.249780
67 2027-12-31  1.339086   -2.242304    4.447439
68 2028-12-31  0.323281   -3.216013    3.840507
69 2029-12-31  1.153610   -2.488313    4.479021
70 2030-12-31  0.916547   -2.747790    4.278262
         Year      yhat  yhat_lower  yhat_upper  \
0  1960-12-31  5.051006    1.476586    8.540856   
1  1961-12-31  5.329845    1.868772    8.686989   
2  1962-12-31  6.138115    2.913853    9.596869   
3  1963-12-31  5.878718    2.510405    9.351771   
4  1964-12-31  4.712352    1.661007    8.216610   
..        ...       ...         ...         ...   
66 2026-12-31  0.776003   -2.505651    4.249780   
67 2027-12-31  1.339086   -2.242304    4.447439   
68 2028-12-31  0.323281   -3.216013    3.840507   
69 2029-12-31  1.153610   -2.488313    4.479021   
70 2030-12-31  0.916547   -2.747790    4.278262   

    Inflation_ConsumerPrice_Annual_Percentage  
0                                    1.358696  
1                                    1.018767  
2                                    1.061571  
3                                    1.628151  
4                                    1.912145  
..                                        ...  
66                                        NaN  
67                                        NaN  
68                                        NaN  
69                                        NaN  
70                                        NaN  

[71 rows x 5 columns]
In [ ]: