Time Series Forecasting - Box-Jekins Methon (ARIMA) in Python and

Time Series Forecasting - FBProphet in Python

Denmark Inflation - Consumer Price Annual Percentage Forecasting

In [3]:
# ignore warnings
import pandas as pd
import warnings
warnings.filterwarnings("ignore")

Load dataset

In [4]:
# load dataset
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
from pandas.plotting import autocorrelation_plot

filename = 'Denmark_Inflation_ConsumerPriceData_Annual_percentages.csv'

df = read_csv(filename)
df = df.set_index('Year')

df.plot(figsize = (8,6))

fig = pyplot.figure(figsize = (8,6))
autocorrelation_plot(df)
pyplot.show()

print(df.head(5))
      Inflation_ConsumerPrice_Annual_Percentage
Year                                           
1960                                   1.255230
1961                                   3.451629
1962                                   7.377820
1963                                   6.105033
1964                                   3.092784

Autocorrelation and Partial Autocorrelation in Python

In [5]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(6,4), 'figure.dpi':120})

plot_acf(df)
pyplot.show()
In [6]:
from statsmodels.graphics.tsaplots import plot_pacf

plot_pacf(df)
pyplot.show()

ADF test

In [7]:
from statsmodels.tsa.stattools import adfuller

# ADF Test
def adf_test(series):
    result = adfuller(series, autolag='AIC')
    print(); print(f'ADF Statistic: {result[0]}')
    print();  print(f'n_lags: {result[1]}')
    print();  print(f'p-value: {result[1]}')

    print(); print('Critial Values:')
    for key, value in result[4].items():
        print(f'   {key}, {value}')   

adf_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
ADF Statistic: -1.0187820813695128

n_lags: 0.7463524795436033

p-value: 0.7463524795436033

Critial Values:
   1%, -3.568485864
   5%, -2.92135992
   10%, -2.5986616

KPSS Test

In [8]:
from statsmodels.tsa.stattools import kpss

def kpss_test(series, **kw):    
    
    statistic, p_value, n_lags, critical_values = kpss(series, **kw)
    
    # Format Output
    print(); print(f'KPSS Statistic: {statistic}')
    print(); print(f'p-value: {p_value}')
    print(); print(f'num lags: {n_lags}')
    print(); print('Critial Values:')
    for key, value in critical_values.items():
        print(f'   {key} : {value}')
    
kpss_test(df["Inflation_ConsumerPrice_Annual_Percentage"])
KPSS Statistic: 0.4078646728725161

p-value: 0.07376522721012238

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

How to find the order of differencing (d) in ARIMA model

In [9]:
import numpy as np, pandas as pd
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.figsize':(12,14), 'figure.dpi':120})

# Import data
#df = pd.read_csv('shampoo.csv', header=0, names = ['Sales'])
df.reset_index(drop=True, inplace=True)

# Original Series
fig, axes = plt.subplots(5, 2, sharex=True)
axes[0, 0].plot(df.values); axes[0, 0].set_title('Original Series')
plot_acf(df.values, ax=axes[0, 1])

# 1st Differencing
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
axes[1, 0].plot(df1); axes[1, 0].set_title('1st Order Differencing')
plot_acf(df1.dropna(), ax=axes[1, 1])

# 2nd Differencing
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
axes[2, 0].plot(df2); axes[2, 0].set_title('2nd Order Differencing')
plot_acf(df2.dropna(), ax=axes[2, 1])

# 3rd Differencing
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
axes[3, 0].plot(df3); axes[3, 0].set_title('3rd Order Differencing')
plot_acf(df3.dropna(), ax=axes[3, 1])

# 3rd Differencing
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
axes[4, 0].plot(df4); axes[4, 0].set_title('4th Order Differencing')
plot_acf(df4.dropna(), ax=axes[4, 1])

plt.show()

ADF and KPSS statistics

In [10]:
warnings.filterwarnings("ignore")

print("---------------------------------------------")
print("First Diffencing: ")
print("---------------------------------------------")
df1 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff()
print(); print("---------------------------------------------")
adf_test(df1.dropna())
print(); print("---------------------------------------------")
kpss_test(df1.dropna())
print(); print("---------------------------------------------")


print(); print("---------------------------------------------")
print("2nd Diffencing: ")
print("---------------------------------------------")
df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff()
print(); print("---------------------------------------------")
adf_test(df2.dropna())
print(); print("---------------------------------------------")
kpss_test(df2.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("3rd Diffencing: ")
print("---------------------------------------------")
df3 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df3.dropna())
print(); print("---------------------------------------------")
kpss_test(df3.dropna())
print(); print("---------------------------------------------")

print(); print("---------------------------------------------")
print("4th Diffencing: ")
print("---------------------------------------------")
df4 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff().diff().diff()
print(); print("---------------------------------------------")
adf_test(df4.dropna())
print(); print("---------------------------------------------")
kpss_test(df4.dropna())
print(); print("---------------------------------------------")
---------------------------------------------
First Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -2.2692534393301624

n_lags: 0.18207613690808988

p-value: 0.18207613690808988

Critial Values:
   1%, -3.5745892596209488
   5%, -2.9239543084490744
   10%, -2.6000391840277777

---------------------------------------------

KPSS Statistic: 0.22624094470506997

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
2nd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -3.384516067347987

n_lags: 0.011498180158395955

p-value: 0.011498180158395955

Critial Values:
   1%, -3.5745892596209488
   5%, -2.9239543084490744
   10%, -2.6000391840277777

---------------------------------------------

KPSS Statistic: 0.147282900624081

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
3rd Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -5.549720818778188

n_lags: 1.630192005413037e-06

p-value: 1.630192005413037e-06

Critial Values:
   1%, -3.5812576580093696
   5%, -2.9267849124681518
   10%, -2.6015409829867675

---------------------------------------------

KPSS Statistic: 0.13524505664159994

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------

---------------------------------------------
4th Diffencing: 
---------------------------------------------

---------------------------------------------

ADF Statistic: -6.587413982879218

n_lags: 7.2560053615966295e-09

p-value: 7.2560053615966295e-09

Critial Values:
   1%, -3.584828853223594
   5%, -2.9282991495198907
   10%, -2.6023438271604937

---------------------------------------------

KPSS Statistic: 0.17912950785953932

p-value: 0.1

num lags: 11

Critial Values:
   10% : 0.347
   5% : 0.463
   2.5% : 0.574
   1% : 0.739

---------------------------------------------
In [ ]:
 

How to find the order of the AR term (p)

In [11]:
from statsmodels.graphics.tsaplots import plot_pacf

# PACF plot 
plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()

axes[0].plot(df2); axes[0].set_title('2nd Differencing')
axes[1].set(ylim=(-3,3))
plot_pacf(df2.dropna(), ax=axes[1]) #PACF

plt.show()

How to find the order of the MA term (q)

In [12]:
from statsmodels.graphics.tsaplots import plot_acf
import matplotlib.pyplot as plt

plt.rcParams.update({'figure.figsize':(9,3), 'figure.dpi':120})
fig, axes = plt.subplots(1, 2, sharex=True)

df2 = df["Inflation_ConsumerPrice_Annual_Percentage"].diff().diff() #.diff() #.diff()
axes[0].plot(df2); axes[0].set_title('2nd Differencing')
#axes[1].set(ylim=(0,1.2))
plot_acf(df2.dropna(), ax=axes[1]) # ACF

plt.show()
In [13]:
## ADF test
adf_test(df2.dropna())
ADF Statistic: -3.384516067347987

n_lags: 0.011498180158395955

p-value: 0.011498180158395955

Critial Values:
   1%, -3.5745892596209488
   5%, -2.9239543084490744
   10%, -2.6000391840277777

Build the ARIMA(p,d,q) Model

In [14]:
from statsmodels.tsa.arima_model import ARIMA

plt.rcParams.update({'figure.figsize':(16,6), 'figure.dpi':220})

df = read_csv(filename)
df = df.set_index('Year')

# ARIMA Model
model = ARIMA(df["Inflation_ConsumerPrice_Annual_Percentage"], order=(2,2,0))
model_fit = model.fit(disp=0)
print(model_fit.summary())

# Plot residual errors
residuals = pd.DataFrame(model_fit.resid)
fig, ax = plt.subplots(1,2)
residuals.plot(title="Residuals", ax=ax[0])
residuals.plot(kind='kde', title='Density', ax=ax[1])
plt.show()

# Actual vs Fitted
model_fit.plot_predict(dynamic=False)
plt.show()
                                          ARIMA Model Results                                           
========================================================================================================
Dep. Variable:     D2.Inflation_ConsumerPrice_Annual_Percentage   No. Observations:                   59
Model:                                           ARIMA(2, 2, 0)   Log Likelihood                -128.440
Method:                                                 css-mle   S.D. of innovations              2.121
Date:                                          Mon, 02 Aug 2021   AIC                            264.880
Time:                                                  17:08:57   BIC                            273.191
Sample:                                                       2   HQIC                           268.124
                                                                                                        
======================================================================================================================
                                                         coef    std err          z      P>|z|      [0.025      0.975]
----------------------------------------------------------------------------------------------------------------------
const                                                 -0.0384      0.131     -0.293      0.770      -0.295       0.218
ar.L1.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.6607      0.114     -5.813      0.000      -0.884      -0.438
ar.L2.D2.Inflation_ConsumerPrice_Annual_Percentage    -0.4752      0.115     -4.129      0.000      -0.701      -0.250
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
AR.1           -0.6952           -1.2732j            1.4507           -0.3295
AR.2           -0.6952           +1.2732j            1.4507            0.3295
-----------------------------------------------------------------------------
In [ ]:
 

using Auto ARIMA

In [15]:
import pmdarima as pm

model_with_auto_d = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=None,           # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_auto_d.summary())
Fit ARIMA: order=(1, 1, 1); AIC=nan, BIC=nan, Fit time=nan seconds
Fit ARIMA: order=(0, 1, 0); AIC=245.616, BIC=249.805, Fit time=0.002 seconds
Fit ARIMA: order=(1, 1, 0); AIC=246.850, BIC=253.133, Fit time=0.013 seconds
Fit ARIMA: order=(0, 1, 1); AIC=246.009, BIC=252.292, Fit time=0.013 seconds
Total fit time: 0.044 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -120.808
Method:                           css   S.D. of innovations              1.812
Date:                Mon, 02 Aug 2021   AIC                            245.616
Time:                        17:08:59   BIC                            249.805
Sample:                             1   HQIC                           247.255
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0139      0.234     -0.059      0.953      -0.472       0.445
==============================================================================
In [16]:
model_with_d_equals_1 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, max_d=4, # maximum p, q and d
                      m=1,              # frequency of series
                      
                      d=1,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_1.summary())
Fit ARIMA: order=(1, 1, 1); AIC=nan, BIC=nan, Fit time=nan seconds
Fit ARIMA: order=(0, 1, 0); AIC=245.616, BIC=249.805, Fit time=0.002 seconds
Fit ARIMA: order=(1, 1, 0); AIC=246.850, BIC=253.133, Fit time=0.013 seconds
Fit ARIMA: order=(0, 1, 1); AIC=246.009, BIC=252.292, Fit time=0.013 seconds
Total fit time: 0.040 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                    D.y   No. Observations:                   60
Model:                 ARIMA(0, 1, 0)   Log Likelihood                -120.808
Method:                           css   S.D. of innovations              1.812
Date:                Mon, 02 Aug 2021   AIC                            245.616
Time:                        17:08:59   BIC                            249.805
Sample:                             1   HQIC                           247.255
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0139      0.234     -0.059      0.953      -0.472       0.445
==============================================================================
In [17]:
model_with_d_equals_2 = pm.auto_arima(df, start_p=1, start_q=1,
                      test='adf',       # use adftest to find optimal 'd'
                      max_p=4, max_q=4, # maximum p, q
                      m=1,              # frequency of series
                      
                      d=2,              # let model determine 'd'
                      
                      seasonal=False,   # No Seasonality
                      start_P=0, 
                      D=0, 
                      trace=True,
                      error_action='ignore',  
                      suppress_warnings=True, 
                      stepwise=True)

print(model_with_d_equals_2.summary())
Fit ARIMA: order=(1, 2, 1); AIC=249.182, BIC=257.492, Fit time=0.092 seconds
Fit ARIMA: order=(0, 2, 0); AIC=289.056, BIC=293.211, Fit time=0.010 seconds
Fit ARIMA: order=(1, 2, 0); AIC=277.544, BIC=283.776, Fit time=0.014 seconds
Fit ARIMA: order=(0, 2, 1); AIC=247.870, BIC=254.103, Fit time=0.048 seconds
Fit ARIMA: order=(0, 2, 2); AIC=nan, BIC=nan, Fit time=nan seconds
Fit ARIMA: order=(1, 2, 2); AIC=251.872, BIC=262.260, Fit time=0.226 seconds
Total fit time: 0.392 seconds
                             ARIMA Model Results                              
==============================================================================
Dep. Variable:                   D2.y   No. Observations:                   59
Model:                 ARIMA(0, 2, 1)   Log Likelihood                -120.935
Method:                       css-mle   S.D. of innovations              1.815
Date:                Mon, 02 Aug 2021   AIC                            247.870
Time:                        17:08:59   BIC                            254.103
Sample:                             2   HQIC                           250.303
                                                                              
==============================================================================
                 coef    std err          z      P>|z|      [0.025      0.975]
------------------------------------------------------------------------------
const         -0.0122      0.014     -0.900      0.372      -0.039       0.014
ma.L1.D2.y    -1.0000      0.051    -19.566      0.000      -1.100      -0.900
                                    Roots                                    
=============================================================================
                  Real          Imaginary           Modulus         Frequency
-----------------------------------------------------------------------------
MA.1            1.0000           +0.0000j            1.0000            0.0000
-----------------------------------------------------------------------------

How to interpret the residual plots in ARIMA model

In [18]:
model_with_auto_d.plot_diagnostics(figsize=(12,10))
plt.show()
In [19]:
model_with_d_equals_1.plot_diagnostics(figsize=(12,10))
plt.show()
In [20]:
model_with_d_equals_2.plot_diagnostics(figsize=(12,10))
plt.show()

Forecast

In [21]:
model = model_with_auto_d
In [22]:
# Forecast
n_periods = 10
fc, confint = model.predict(n_periods=n_periods, return_conf_int=True)
#index_of_fc = np.arange(len(df), len(df)+n_periods)
index_of_fc = np.arange(2020, 2020+n_periods)

# make series for plotting purpose
fc_series = pd.Series(fc, index=index_of_fc)
lower_series = pd.Series(confint[:, 0], index=index_of_fc)
upper_series = pd.Series(confint[:, 1], index=index_of_fc)

# Plot
plt.plot(df)
plt.plot(fc_series, color='darkgreen')
plt.fill_between(lower_series.index, 
                 lower_series, 
                 upper_series, 
                 color='k', alpha=.15)

plt.title("Final Forecast")
plt.show()

print(); print(fc_series)
print(); print(lower_series)
print(); print(upper_series)
2020    0.406803
2021    0.392895
2022    0.378986
2023    0.365077
2024    0.351169
2025    0.337260
2026    0.323352
2027    0.309443
2028    0.295534
2029    0.281626
dtype: float64

2020    -3.145002
2021    -4.630117
2022    -5.772921
2023    -6.738534
2024    -7.590910
2025    -8.362851
2026    -9.073842
2027    -9.736580
2028   -10.359882
2029   -10.950170
dtype: float64

2020     3.958609
2021     5.415906
2022     6.530894
2023     7.468688
2024     8.293247
2025     9.037371
2026     9.720546
2027    10.355466
2028    10.950951
2029    11.513421
dtype: float64

Using FB Prophet

In [23]:
from fbprophet import Prophet
import pandas as pd

df = read_csv(filename)
#df = df.set_index('Year')

print(df.head())
#print(); print(df[['Year', 'Population']])

df["End_Year"] = 0
for i in range(0, len(df)):
    df.iloc[i, 2] = str(df.iloc[i, 0]) + '-12-' + '31'

print(); print(df.head())
   Year  Inflation_ConsumerPrice_Annual_Percentage
0  1960                                   1.255230
1  1961                                   3.451629
2  1962                                   7.377820
3  1963                                   6.105033
4  1964                                   3.092784

   Year  Inflation_ConsumerPrice_Annual_Percentage    End_Year
0  1960                                   1.255230  1960-12-31
1  1961                                   3.451629  1961-12-31
2  1962                                   7.377820  1962-12-31
3  1963                                   6.105033  1963-12-31
4  1964                                   3.092784  1964-12-31
In [24]:
# Create a new Data Frame
df_pop = pd.DataFrame()

df_pop[['ds','y']] = df[['End_Year', 'Inflation_ConsumerPrice_Annual_Percentage']]

# Convert Data Frame to FBProphet Timeseries ds and y
df_pop['ds'] = pd.to_datetime(df_pop['ds'])
df_pop['y']  = pd.to_numeric(df_pop['y'])

print(df_pop.tail())

# Create FBProphet Model with Dataset
m = Prophet(daily_seasonality=False, weekly_seasonality=True, yearly_seasonality=True)
m.fit(df_pop)

future = m.make_future_dataframe(periods=10, freq = 'Y')

print()
print(future.tail(26))

forecast = m.predict(future)
print(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail(25))

fig = m.plot(forecast)
plt.show()


# Save Data in a CSV file
df_final = pd.DataFrame()
df_final[['Year', 'yhat', 'yhat_lower', 'yhat_upper']] = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]

df_final['Inflation_ConsumerPrice_Annual_Percentage'] = df_pop['y']

print(df_final.head(len(df_final)))

#df_final.to_csv('Forecast_final.csv',index = False)
           ds         y
56 2016-12-31  0.250000
57 2017-12-31  1.147132
58 2018-12-31  0.813609
59 2019-12-31  0.758132
60 2020-12-31  0.420712

           ds
45 2005-12-31
46 2006-12-31
47 2007-12-31
48 2008-12-31
49 2009-12-31
50 2010-12-31
51 2011-12-31
52 2012-12-31
53 2013-12-31
54 2014-12-31
55 2015-12-31
56 2016-12-31
57 2017-12-31
58 2018-12-31
59 2019-12-31
60 2020-12-31
61 2021-12-31
62 2022-12-31
63 2023-12-31
64 2024-12-31
65 2025-12-31
66 2026-12-31
67 2027-12-31
68 2028-12-31
69 2029-12-31
70 2030-12-31
           ds      yhat  yhat_lower  yhat_upper
46 2006-12-31  2.189510   -0.905249    5.646230
47 2007-12-31  2.892272   -0.316205    6.376764
48 2008-12-31  2.199790   -1.080795    5.487109
49 2009-12-31  1.324743   -1.980307    4.748425
50 2010-12-31  1.810479   -1.404189    5.172561
51 2011-12-31  1.462739   -2.205934    4.749864
52 2012-12-31  1.746059   -1.670171    5.181061
53 2013-12-31  1.594333   -1.565585    5.026206
54 2014-12-31  1.765680   -1.591027    5.304650
55 2015-12-31  0.797159   -2.642303    4.387163
56 2016-12-31  0.316527   -2.871974    3.647410
57 2017-12-31  0.562348   -2.755439    3.998553
58 2018-12-31  1.311949   -2.074667    4.706514
59 2019-12-31  1.066749   -1.971999    4.523010
60 2020-12-31 -0.349053   -3.733080    3.065136
61 2021-12-31  0.183317   -3.067741    3.587343
62 2022-12-31 -0.117583   -3.501404    3.317445
63 2023-12-31  0.034764   -3.227265    3.641516
64 2024-12-31 -0.079464   -3.493405    3.099086
65 2025-12-31  0.138518   -3.184977    3.499953
66 2026-12-31 -0.783163   -4.214671    2.529854
67 2027-12-31 -0.344267   -3.737721    3.111786
68 2028-12-31 -1.111449   -4.381252    2.644930
69 2029-12-31 -0.315212   -3.720018    3.212628
70 2030-12-31 -0.513574   -3.795348    2.628586
         Year      yhat  yhat_lower  yhat_upper  \
0  1960-12-31  7.786582    4.436716   11.211063   
1  1961-12-31  8.032407    4.702957   11.451007   
2  1962-12-31  8.782013    5.559859   12.054137   
3  1963-12-31  8.536816    5.144343   11.939452   
4  1964-12-31  7.121019    4.206817   10.425845   
..        ...       ...         ...         ...   
66 2026-12-31 -0.783163   -4.214671    2.529854   
67 2027-12-31 -0.344267   -3.737721    3.111786   
68 2028-12-31 -1.111449   -4.381252    2.644930   
69 2029-12-31 -0.315212   -3.720018    3.212628   
70 2030-12-31 -0.513574   -3.795348    2.628586   

    Inflation_ConsumerPrice_Annual_Percentage  
0                                    1.255230  
1                                    3.451629  
2                                    7.377820  
3                                    6.105033  
4                                    3.092784  
..                                        ...  
66                                        NaN  
67                                        NaN  
68                                        NaN  
69                                        NaN  
70                                        NaN  

[71 rows x 5 columns]
In [ ]: