#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ estbern.py Purpose: Estimate a weird senseless bernoulli model Version: 1 First start Date: 2022/8/26 Author: Charles Bos """ ########################################################### ### Imports import numpy as np import pandas as pd import matplotlib.pyplot as plt import scipy.optimize as opt ########################################################### ### mX= genX(iN, iK) def genX(iN, iK): """ Purpose: Generate the X matrix with a constant and standard normal Inputs: iN integer, number of rows iK integer, number of columns Return value: mX iN x iK matrix, regressors """ mX= np.random.randn(iN, iK) mX[:,0]=1 return mX ########################################################### ### vY= genY(vXB) def genY(vXB): """ Purpose: Generate the Y|Xbeta vector, from bernoulli Inputs: vXB iN vector, X beta Return value: vY iN vector, observations """ iN= len(vXB) vP= np.exp(vXB)/(1+ np.exp(vXB)) # print ('p:', vP) vU= np.random.rand(iN) vY= vU < vP return vY ########################################################### ### main def LLBernRegr(vP, vY, mX): """ Purpose: Compute the loglikelihood of the weird Bernoulli model Inputs: vP iK vector of parameters vY iN vector of observations mX iN x iK matrix of regressors Return value: vLL iN vector of loglikelihoods """ iK= len(vP) iN= len(vY) vY= vY.astype(bool) vBeta= vP vXB= mX@vBeta vProb= np.exp(vXB)/(1+ np.exp(vXB)) # vLL= vY * np.log(vProb) + (1-vY) * np.log(1-vProb) vLL= np.zeros(iN) vLL[vY]= np.log(vProb[vY]) vLL[~vY]= np.log(1-vProb[~vY]) print ('.', end= '') return vLL ########################################################### ### main def EstimateBern(vY, mX, vBeta): """ Purpose: Estimate the bernoulli weird model Inputs: vY iN vector of observations mX iN x iK matrix of regressors vBeta iK vector of True parameters Return value: df dataframe, results """ (iN, iK)= mX.shape vB0= np.linalg.inv(mX.T@mX)@mX.T@vY # vB0= vBeta fnNAvgLnLBernRegr= lambda vBeta: -np.mean(LLBernRegr(vBeta, vY, mX)) res= opt.minimize(fnNAvgLnLBernRegr, vB0) print ('\n\nOptimising without restrictions results in ', res.message) df= pd.DataFrame(index=['B%i' % i for i in range(iK)]) df['Btrue']= vBeta df['B0']= vB0 df['Bhat']= res.x return df ########################################################### ### vBetaTr= TransPars(vBeta) def TransPars(vBeta, dLim= 0.5): """ """ vBetaTr= np.log(vBeta-dLim) return vBetaTr ########################################################### ### vBeta= TransbackPars(vBetaTr) def TransbackPars(vBetaTr, dLim= 0.5): """ """ vBeta= np.exp(vBetaTr) + dLim return vBeta ########################################################### ### main def EstimateBernRestr(vY, mX, vBeta, dLim= 0.5): """ Purpose: Estimate the bernoulli weird model, with restriction Inputs: vY iN vector of observations mX iN x iK matrix of regressors vBeta iK vector of True parameters Return value: df dataframe, results """ (iN, iK)= mX.shape vB0= np.linalg.inv(mX.T@mX)@mX.T@vY + dLim vB0Tr= TransPars(vB0, dLim) # TransbackPars(vB0Tr) # vB0= vBeta fnNAvgLnLBernRegrTr= lambda vBetaTr: -np.mean(LLBernRegr(TransbackPars(vBetaTr), vY, mX)) res= opt.minimize(fnNAvgLnLBernRegrTr, vB0Tr) print ('\n\nOptimising with restrictions results in ', res.message) df= pd.DataFrame(index=['B%i' % i for i in range(iK)]) df['Btrue']= vBeta df['B0']= vB0 df['Bhat']= TransbackPars(res.x, dLim) return df ########################################################### ### main def main(): # Magic numbers iN=10000 vBeta= [1, 1, 1] iSeed= 1234 # Initialisation np.random.seed(iSeed) vBeta= np.array(vBeta) iK= len(vBeta) mX= genX(iN, iK) vY= genY(mX@vBeta) # print (vY.mean()) # vLL= LLBernRegr(vBeta, vY, mX) # vLL.sum() # Estimation df= EstimateBern(vY, mX, vBeta) dfTr= EstimateBernRestr(vY, mX, vBeta, dLim= 0.5) # Output print ('\nResults:\n', df) print ('\nResults Transform:\n', dfTr) ########################################################### ### start main if __name__ == "__main__": main()