Time Series Forecasting - Box-Jekins Methon (ARIMA) in Python and

Time Series Forecasting - FBProphet in Python

UK Inflation - Consumer Price Annual Percentage Forecasting

In [3]:
# ignore warnings
import pandas as pd
import warnings
warnings.filterwarnings("ignore")

Load dataset

In [4]:
# load dataset
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot

filename = 'UK_Inflation_ConsumerPriceData_Annual_percentages.csv'

df = read_csv(filename)
df = df.set_index('Year')

df.plot(figsize = (8,6))

fig = pyplot.figure(figsize = (8,6))
autocorrelation_plot(df)
pyplot.show()

print(df.head(5))
      Inflation_ConsumerPrice_Annual_Percentage
Year                                           
1960                                   1.003576
1961                                   3.447496
1962                                   4.196499
1963                                   2.018544
1964                                   3.281587

Autocorrelation and Partial Autocorrelation in Python

In [5]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(6,4), 'figure.dpi':120})

plot_acf(df)
pyplot.show()
In [6]:
from statsmodels.graphics.tsaplots import plot_pacf

plot_pacf(df)
pyplot.show()

ADF test

In [7]:
from statsmodels.tsa.stattools import adfuller

# ADF Test
def adf_test(series):
    result = adfuller(series, autolag='AIC')
    print(); print(f'ADF Statistic: {result[0]}')
    print();  print(f'n_lags: {result[1]}')
    print();  print(f'p-value: {result[1]}')

    print(); print('Critial Values:')
    for key, value in result[4].items():
        print(f'   {key}, {value}')   

adf_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
ADF Statistic: -1.7919693400418135

n_lags: 0.38445591927534806

p-value: 0.38445591927534806

Critial Values:
   1%, -3.5552728880540942
   5%, -2.9157312396694217
   10%, -2.5956695041322315

KPSS Test

In [8]:
from statsmodels.tsa.stattools import kpss

def kpss_test(series, **kw):    
    
    statistic, p_value, n_lags, critical_values = kpss(series, **kw)
    
    # Format Output
    print(); print(f'KPSS Statistic: {statistic}')
    print(); print(f'p-value: {p_value}')
    print(); print(f'num lags: {n_lags}')
    print(); print('Critial Values:')
    for key, value in critical_values.items():
        print(f'   {key} : {value}')
    
kpss_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
KPSS Statistic: 0.28575939835501823

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739
/home/crown/miniconda/envs/nilimeshdss/lib/python3.6/site-packages/statsmodels/tsa/stattools.py:1278: InterpolationWarning: p-value is greater than the indicated p-value
  warn("p-value is greater than the indicated p-value", InterpolationWarning)

How to find the order of differencing (d) in ARIMA model

In [9]:
import numpy as np, pandas as pd
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.figsize':(12,14), 'figure.dpi':120})

# Import data
#df = pd.read_csv('shampoo.csv', header=0, names = ['Sales'])
df.reset_index(drop=True, inplace=True)

# Original Series
fig, axes = plt.subplots(5, 2, sharex=True)
axes[0, 0].plot(df.values); axes[0, 0].set_title('Original Series')
plot_acf(df.values, ax=axes[0, 1])

# 1st Differencing
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
axes[1, 0].plot(df1); axes[1, 0].set_title('1st Order Differencing')
plot_acf(df1.dropna(), ax=axes[1, 1])

# 2nd Differencing
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
axes[2, 0].plot(df2); axes[2, 0].set_title('2nd Order Differencing')
plot_acf(df2.dropna(), ax=axes[2, 1])

# 3rd Differencing
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
axes[3, 0].plot(df3); axes[3, 0].set_title('3rd Order Differencing')
plot_acf(df3.dropna(), ax=axes[3, 1])

# 3rd Differencing
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
axes[4, 0].plot(df4); axes[4, 0].set_title('4th Order Differencing')
plot_acf(df4.dropna(), ax=axes[4, 1])

plt.show()

ADF and KPSS statistics

In [10]:
warnings.filterwarnings("ignore")

print("---------------------------------------------")
print("First Diffencing: ")
print("---------------------------------------------")
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
print(); print("---------------------------------------------")
adf_test(df1.dropna())
print(); print("---------------------------------------------")
kpss_test(df1.dropna())
print(); print("---------------------------------------------")


print(); print("---------------------------------------------")
print("2nd Diffencing: ")
print("---------------------------------------------")
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
print(); print("---------------------------------------------")
adf_test(df2.dropna())
print(); print("---------------------------------------------")
kpss_test(df2.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("3rd Diffencing: ")
print("---------------------------------------------")
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df3.dropna())
print(); print("---------------------------------------------")
kpss_test(df3.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("4th Diffencing: ")
print("---------------------------------------------")
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df4.dropna())
print(); print("---------------------------------------------")
kpss_test(df4.dropna())
print(); print("---------------------------------------------")
---------------------------------------------
First Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -2.959663664792522

n_lags: 0.038836888989189355

p-value: 0.038836888989189355

Critial Values:
   1%, -3.5552728880540942
   5%, -2.9157312396694217
   10%, -2.5956695041322315

---------------------------------------------

KPSS Statistic: 0.16664195914070512

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
2nd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -9.367007729650966

n_lags: 7.552851281915937e-16

p-value: 7.552851281915937e-16

Critial Values:
   1%, -3.5552728880540942
   5%, -2.9157312396694217
   10%, -2.5956695041322315

---------------------------------------------

KPSS Statistic: 0.09926362893835669

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
3rd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -7.648982723122367

n_lags: 1.8104857248746862e-11

p-value: 1.8104857248746862e-11

Critial Values:
   1%, -3.562878534649522
   5%, -2.918973284023669
   10%, -2.597393446745562

---------------------------------------------

KPSS Statistic: 0.09755873545131791

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
4th Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.677926747306364

n_lags: 8.618616648145705e-07

p-value: 8.618616648145705e-07

Critial Values:
   1%, -3.5778480370438146
   5%, -2.925338105429433
   10%, -2.6007735310095064

---------------------------------------------

KPSS Statistic: 0.10005594456307501

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------
In [ ]:
 

How to find the order of the AR term (p)

In [11]:
from statsmodels.graphics.tsaplots import plot_pacf

# PACF plot 
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()

axes[0].plot(df2); axes[0].set_title('2nd Differencing')
axes[1].set(ylim=(-3,3))
plot_pacf(df2.dropna(), ax=axes[1]) #PACF

plt.show()

How to find the order of the MA term (q)

In [12]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()
axes[0].plot(df2); axes[0].set_title('2nd Differencing')
#axes[1].set(ylim=(0,1.2))
plot_acf(df2.dropna(), ax=axes[1]) # ACF

plt.show()
In [13]:
## ADF test
adf_test(df2.dropna())
ADF Statistic: -9.367007729650966

n_lags: 7.552851281915937e-16

p-value: 7.552851281915937e-16

Critial Values:
   1%, -3.5552728880540942
   5%, -2.9157312396694217
   10%, -2.5956695041322315

Build the ARIMA(p,d,q) Model

In [14]:
from statsmodels.tsa.arima_model import ARIMA

plt.rcParams.update({'figure.figsize':(16,6), 'figure.dpi':220})

df = read_csv(filename)
df = df.set_index('Year')

# ARIMA Model
model = ARIMA(df["Inflation_ConsumerPrice_Annual_Percentage"], order=(2,2,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())

# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1,2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()

# Actual vs Fitted
model_fit.plot_predict(dynamic=False)
plt.show()
                                          ARIMA Model Results                                           
========================================================================================================
Dep. Variable:     D2.Inflation_ConsumerPrice_Annual_Percentage   No. Observations:                   59
Model:                                           ARIMA(2, 2, 0)   Log Likelihood                -155.151
Method:                                                 css-mle   S.D. of innovations              3.347
Date:                                          Wed, 04 Aug 2021   AIC                            318.302
Time:                                                  16:38:08   BIC                            326.612
Sample:                                                       2   HQIC                           321.546
                                                                                                        
======================================================================================================================
                                                         coef    std err          z      P>|z|      [0.025      0.975]
----------------------------------------------------------------------------------------------------------------------
const                                                 -0.0329      0.249     -0.132      0.896      -0.521       0.455
ar.L1.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.5078      0.125     -4.051      0.000      -0.753      -0.262
ar.L2.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.2595      0.124     -2.085      0.042      -0.504      -0.016
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
AR.1           -0.9782           -1.7018j            1.9629           -0.3330
AR.2           -0.9782           +1.7018j            1.9629            0.3330
-----------------------------------------------------------------------------
In [ ]:
 

using Auto ARIMA

In [15]:
import pmdarima as pm

model_with_auto_d = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=None,           # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_auto_d.summary())
Fit ARIMA: order=(1, 1, 1); AIC=nan, BIC=nan, Fit time=nan seconds
Fit ARIMA: order=(0, 1, 0); AIC=292.370, BIC=296.559, Fit time=0.003 seconds
Fit ARIMA: order=(1, 1, 0); AIC=294.370, BIC=300.653, Fit time=0.012 seconds
Fit ARIMA: order=(0, 1, 1); AIC=294.370, BIC=300.653, Fit time=0.009 seconds
Total fit time: 0.038 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -144.185
Method:                           css   S.D. of innovations              2.676
Date:                Wed, 04 Aug 2021   AIC                            292.370
Time:                        16:38:10   BIC                            296.559
Sample:                             1   HQIC                           294.009
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0002      0.345     -0.001      0.999      -0.677       0.677
==============================================================================
In [16]:
model_with_d_equals_1 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, max_d=4, # maximum p, q and d
                      m=1,              # frequency of series
                      
                      d=1,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_1.summary())
Fit ARIMA: order=(1, 1, 1); AIC=nan, BIC=nan, Fit time=nan seconds
Fit ARIMA: order=(0, 1, 0); AIC=292.370, BIC=296.559, Fit time=0.002 seconds
Fit ARIMA: order=(1, 1, 0); AIC=294.370, BIC=300.653, Fit time=0.012 seconds
Fit ARIMA: order=(0, 1, 1); AIC=294.370, BIC=300.653, Fit time=0.008 seconds
Total fit time: 0.037 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -144.185
Method:                           css   S.D. of innovations              2.676
Date:                Wed, 04 Aug 2021   AIC                            292.370
Time:                        16:38:10   BIC                            296.559
Sample:                             1   HQIC                           294.009
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0002      0.345     -0.001      0.999      -0.677       0.677
==============================================================================
In [17]:
model_with_d_equals_2 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=2,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_2.summary())
Fit ARIMA: order=(1, 2, 1); AIC=296.162, BIC=304.472, Fit time=0.059 seconds
Fit ARIMA: order=(0, 2, 0); AIC=328.881, BIC=333.037, Fit time=0.002 seconds
Fit ARIMA: order=(1, 2, 0); AIC=320.472, BIC=326.704, Fit time=0.016 seconds
Fit ARIMA: order=(0, 2, 1); AIC=294.172, BIC=300.405, Fit time=0.044 seconds
Fit ARIMA: order=(0, 2, 2); AIC=296.157, BIC=304.467, Fit time=0.091 seconds
Fit ARIMA: order=(1, 2, 2); AIC=nan, BIC=nan, Fit time=nan seconds
Total fit time: 0.226 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                   D2.y   No. Observations:                   59
Model:                 ARIMA(0, 2, 1)   Log Likelihood                -144.086
Method:                       css-mle   S.D. of innovations              2.687
Date:                Wed, 04 Aug 2021   AIC                            294.172
Time:                        16:38:10   BIC                            300.405
Sample:                             2   HQIC                           296.605
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0139      0.020     -0.693      0.491      -0.053       0.025
ma.L1.D2.y    -1.0000      0.049    -20.411      0.000      -1.096      -0.904
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
MA.1            1.0000           +0.0000j            1.0000            0.0000
-----------------------------------------------------------------------------

How to interpret the residual plots in ARIMA model

In [18]:
model_with_auto_d.plot_diagnostics(figsize=(12,10))
plt.show()
In [19]:
model_with_d_equals_1.plot_diagnostics(figsize=(12,10))
plt.show()
In [20]:
model_with_d_equals_2.plot_diagnostics(figsize=(12,10))
plt.show()

Forecast

In [21]:
model = model_with_auto_d
In [22]:
# Forecast
n_periods = 10
fc, confint = model.predict(n_periods=n_periods, return_conf_int=True)
#index_of_fc = np.arange(len(df), len(df)+n_periods)
index_of_fc = np.arange(2020, 2020+n_periods)

# make series for plotting purpose
fc_series = pd.Series(fc, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)

# Plot
plt.plot(df)
plt.plot(fc_series, color='darkgreen')
plt.fill_between(lower_series.index, 
                 lower_series, 
                 upper_series, 
                 color='k', alpha=.15)

plt.title("Final Forecast")
plt.show()

print(); print(fc_series)
print(); print(lower_series)
print(); print(upper_series)
2020    0.989252
2021    0.989017
2022    0.988782
2023    0.988547
2024    0.988313
2025    0.988078
2026    0.987843
2027    0.987608
2028    0.987373
2029    0.987139
dtype: float64

2020    -4.254697
2021    -6.427047
2022    -8.094004
2023    -9.499351
2024   -10.737514
2025   -11.856922
2026   -12.886343
2027   -13.844520
2028   -14.744474
2029   -15.595685
dtype: float64

2020     6.233201
2021     8.405081
2022    10.071569
2023    11.476446
2024    12.714140
2025    13.833078
2026    14.862029
2027    15.819737
2028    16.719221
2029    17.569962
dtype: float64

Using FB Prophet

In [23]:
from fbprophet import Prophet
import pandas as pd

df = read_csv(filename)
#df = df.set_index('Year')

print(df.head())
#print(); print(df[['Year', 'Population']])

df["End_Year"] = 0
for i in range(0, len(df)):
    df.iloc[i, 2] = str(df.iloc[i, 0]) + '-12-' + '31'

print(); print(df.head())
   Year  Inflation_ConsumerPrice_Annual_Percentage
0  1960                                   1.003576
1  1961                                   3.447496
2  1962                                   4.196499
3  1963                                   2.018544
4  1964                                   3.281587

   Year  Inflation_ConsumerPrice_Annual_Percentage    End_Year
0  1960                                   1.003576  1960-12-31
1  1961                                   3.447496  1961-12-31
2  1962                                   4.196499  1962-12-31
3  1963                                   2.018544  1963-12-31
4  1964                                   3.281587  1964-12-31
In [24]:
# Create a new Data Frame
df_pop = pd.DataFrame()

df_pop[['ds','y']] = df[['End_Year', 'Inflation_ConsumerPrice_Annual_Percentage']]

# Convert Data Frame to FBProphet Timeseries ds and y
df_pop['ds'] = pd.to_datetime(df_pop['ds'])
df_pop['y']  = pd.to_numeric(df_pop['y'])

print(df_pop.tail())

# Create FBProphet Model with Dataset
m = Prophet(daily_seasonality=False, weekly_seasonality=True, yearly_seasonality=True)
m.fit(df_pop)

future = m.make_future_dataframe(periods=10, freq = 'Y')

print()
print(future.tail(26))

forecast = m.predict(future)
print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(25))

fig = m.plot(forecast)
plt.show()


# Save Data in a CSV file
df_final = pd.DataFrame()
df_final[['Year', 'yhat', 'yhat_lower', 'yhat_upper']] = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]

df_final['Inflation_ConsumerPrice_Annual_Percentage'] = df_pop['y']

print(df_final.head(len(df_final)))

#df_final.to_csv('Forecast_final.csv',index = False)
           ds         y
56 2016-12-31  1.008417
57 2017-12-31  2.557756
58 2018-12-31  2.292840
59 2019-12-31  1.738105
60 2020-12-31  0.989487

           ds
45 2005-12-31
46 2006-12-31
47 2007-12-31
48 2008-12-31
49 2009-12-31
50 2010-12-31
51 2011-12-31
52 2012-12-31
53 2013-12-31
54 2014-12-31
55 2015-12-31
56 2016-12-31
57 2017-12-31
58 2018-12-31
59 2019-12-31
60 2020-12-31
61 2021-12-31
62 2022-12-31
63 2023-12-31
64 2024-12-31
65 2025-12-31
66 2026-12-31
67 2027-12-31
68 2028-12-31
69 2029-12-31
70 2030-12-31
           ds      yhat  yhat_lower  yhat_upper
46 2006-12-31  1.691113   -3.682345    7.050399
47 2007-12-31  3.731623   -1.410449    9.005088
48 2008-12-31  5.151807    0.134204   10.244180
49 2009-12-31  1.826116   -3.510885    6.907544
50 2010-12-31  3.165840   -2.308556    8.132842
51 2011-12-31  1.789417   -3.659509    7.036124
52 2012-12-31  2.628703   -2.911957    7.462276
53 2013-12-31  1.950644   -3.032750    7.125764
54 2014-12-31  4.668927   -0.326093    9.930935
55 2015-12-31  1.424325   -3.698731    6.497782
56 2016-12-31  0.686496   -4.414423    5.921186
57 2017-12-31  0.145765   -4.943859    5.491610
58 2018-12-31  2.145823   -2.902748    7.211277
59 2019-12-31  1.548853   -3.400342    6.920236
60 2020-12-31  0.321405   -4.919420    5.435557
61 2021-12-31  1.620492   -3.552410    6.960328
62 2022-12-31  0.203616   -5.152158    5.420393
63 2023-12-31 -0.256026   -5.791578    4.817910
64 2024-12-31  0.445932   -4.734372    5.624799
65 2025-12-31  3.123579   -2.209734    8.049673
66 2026-12-31 -0.161475   -5.621953    5.160252
67 2027-12-31  1.218701   -4.118736    6.145808
68 2028-12-31 -1.358946   -6.529173    3.585960
69 2029-12-31  0.600475   -4.617866    6.075192
70 2030-12-31 -0.036948   -5.366508    5.168195
         Year      yhat  yhat_lower  yhat_upper  \
0  1960-12-31  7.752147    1.917674   12.832587   
1  1961-12-31  7.211432    1.925463   12.386781   
2  1962-12-31  9.211505    4.041571   14.622744   
3  1963-12-31  8.614551    3.451161   13.663158   
4  1964-12-31  7.387119    2.076918   12.689042   
..        ...       ...         ...         ...   
66 2026-12-31 -0.161475   -5.621953    5.160252   
67 2027-12-31  1.218701   -4.118736    6.145808   
68 2028-12-31 -1.358946   -6.529173    3.585960   
69 2029-12-31  0.600475   -4.617866    6.075192   
70 2030-12-31 -0.036948   -5.366508    5.168195   

    Inflation_ConsumerPrice_Annual_Percentage  
0                                    1.003576  
1                                    3.447496  
2                                    4.196499  
3                                    2.018544  
4                                    3.281587  
..                                        ...  
66                                        NaN  
67                                        NaN  
68                                        NaN  
69                                        NaN  
70                                        NaN  

[71 rows x 5 columns]
In [ ]: