Stan



Please don’t mind this post. I use this to try out various highlighting styles for my code and formats.


\begin{algorithm}
\caption{Quicksort}
\begin{algorithmic}
\PROCEDURE{Quicksort}{$A, p, r$}
    \IF{$p < r$} 
        \STATE $q = $ \CALL{Partition}{$A, p, r$}
        \STATE \CALL{Quicksort}{$A, p, q - 1$}
        \STATE \CALL{Quicksort}{$A, q + 1, r$}
    \ENDIF
\ENDPROCEDURE
\PROCEDURE{Partition}{$A, p, r$}
    \STATE $x = A[r]$
    \STATE $i = p - 1$
    \FOR{$j = p$ \TO $r - 1$}
        \IF{$A[j] < x$}
            \STATE $i = i + 1$
            \STATE exchange
            $A[i]$ with $A[j]$
        \ENDIF
        \STATE exchange $A[i]$ with $A[r]$
    \ENDFOR
\ENDPROCEDURE
\end{algorithmic}
\end{algorithm}



YPost=β0+β1Group+β2Base+β3Age+β4Z+β5R1+β6R2+ϵY^{\operatorname{Post}} = \beta_{0} + \beta_{1}^{\operatorname{Group}} + \beta_{2}^{\operatorname{Base}} + \beta_{3}^{\operatorname{Age}} + \beta_{4}^{\operatorname{Z}} + \beta_{5}^{\operatorname{R1}} + \beta_{6}^{\operatorname{R2}} + \epsilon

\[Y^{\operatorname{Post}} = \beta_{0} + \beta_{1}^{\operatorname{Group}} + \beta_{2}^{\operatorname{Base}} + \beta_{3}^{\operatorname{Age}} + \beta_{4}^{\operatorname{Z}} + \beta_{5}^{\operatorname{R1}} + \beta_{6}^{\operatorname{R2}} + \epsilon\]


R Markdown


This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see http://rmarkdown.rstudio.com.

When you click the Knit button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this:



Stan


data {
  int<lower=0> J;         // number of schools
  real y[J];              // estimated treatment effects
  real<lower=0> sigma[J]; // standard error of effect estimates
}
parameters {
  real mu;                // population treatment effect
  real<lower=0> tau;      // standard deviation in treatment effects
  vector[J] eta;          // unscaled deviation from mu by school
}
transformed parameters {
  vector[J] theta = mu + tau * eta;        // school treatment effects
}
model {
  target += normal_lpdf(eta | 0, 1);       // prior log-density
  target += normal_lpdf(y | theta, sigma); // log-likelihood
}


df1 <- read.csv("/Users/zad/Dropbox/LessLikely/ts.csv")
df1$y <- ts(df1$Sales)
df1$ds <- as.Date(df1$Time.Increment)

splits <- initial_time_split(df1, prop = 0.5)
train <- training(splits)
test <- testing(splits)
interactive <- TRUE
# Forecasting with auto.arima
library("forecast")
md <- auto.arima(train$y)
fc <- forecast(md, h = 12)

model_fit_arima_no_boost <- arima_reg() %>%
  set_engine(engine = "auto_arima") %>%
  fit(y ~ ds, data = training(splits))

# Model 2: arima_boost ----
model_fit_arima_boosted <- arima_boost(
  min_n = 2,
  learn_rate = 0.015
) %>%
  set_engine(engine = "auto_arima_xgboost") %>%
  fit(y ~ ds + as.numeric(ds) + factor(month(ds, label = TRUE),
    ordered = F
  ),
  data = training(splits)
  )

# Model 3: ets ----
model_fit_ets <- exp_smoothing() %>%
  set_engine(engine = "ets") %>%
  fit(y ~ ds, data = training(splits))

model_fit_lm <- linear_reg() %>%
  set_engine("lm") %>%
  fit(y ~ as.numeric(ds) + factor(month(ds, label = TRUE),
    ordered = FALSE
  ),
  data = training(splits)
  )

# Model 4: prophet ----
model_fit_prophet <- prophet_reg() %>%
  set_engine(engine = "prophet") %>%
  fit(y ~ ds, data = training(splits))
#> Error in sampler$call_sampler(c(args, dotlist)): c++ exception (unknown reason)
# Model 6: earth ----
model_spec_mars <- mars(mode = "regression") %>%
  set_engine("earth")


recipe_spec <- recipe(y ~ ds, data = training(splits)) %>%
  step_date(ds, features = "month", ordinal = FALSE) %>%
  step_mutate(date_num = as.numeric(ds)) %>%
  step_normalize(date_num) %>%
  step_rm(ds)

wflw_fit_mars <- workflow() %>%
  add_recipe(recipe_spec) %>%
  add_model(model_spec_mars) %>%
  fit(training(splits))

models_tbl <- modeltime_table(
  model_fit_arima_no_boost,
  model_fit_arima_boosted,
  model_fit_ets,
  model_fit_prophet,
  model_fit_lm,
  wflw_fit_mars
)
#> Error in eval(expr, envir, enclos): object 'model_fit_prophet' not found

calibration_tbl <- models_tbl %>%
  modeltime_calibrate(new_data = testing(splits))
#> Error in eval(expr, envir, enclos): object 'models_tbl' not found

calibration_tbl %>%
  modeltime_forecast(
    new_data    = testing(splits),
    actual_data = df1
  ) %>%
  plot_modeltime_forecast(
    .legend_max_width = 25, # For mobile screens
    .interactive      = interactive
  )
#> Error in eval(expr, envir, enclos): object 'calibration_tbl' not found
calibration_tbl %>%
  modeltime_accuracy() %>%
  table_modeltime_accuracy(
    .interactive = FALSE
  )
#> Error in eval(expr, envir, enclos): object 'calibration_tbl' not found

refit_tbl <- calibration_tbl %>%
  modeltime_refit(data = df1)
#> Error in eval(expr, envir, enclos): object 'calibration_tbl' not found

refit_tbl %>%
  modeltime_forecast(h = "4 years", actual_data = df1) %>%
  plot_modeltime_forecast(
    .legend_max_width = 10, # For mobile screens
    .interactive      = TRUE
  )
#> Error in eval(expr, envir, enclos): object 'refit_tbl' not found

plot(greybox::forecast(smooth::adam(df1$y, h = 12, holdout = TRUE)))


plot(greybox::forecast(smooth::es(df1$y, h = 12, holdout = TRUE,
    silent = FALSE)))
#> Forming the pool of models based on... ANN , AAN , Estimation progress:    100 %... Done!


s1 <- bayesforecast::stan_naive(ts = df1$y, chains = 4, iter = 4000,
    cores = 8)

plot(s1) + theme_bw()

check_residuals(s1) + theme_light()

#> NULL
autoplot(object = forecast(s1, h = 12, biasadj = TRUE, PI = TRUE),
    include = 100) + theme_bw()

autoplot(object = forecast(s1, h = 52, biasadj = TRUE, PI = TRUE),
    include = 100) + theme_bw()

ztable(summary(s1))
mean se 5% 95% ess Rhat
mu0 -0.02 0.03 -4.84 4.78 7844 1
sigma0 4642.74 7.21 3712.68 5823.09 7882 1
loglik -200.58 0.01 -202.78 -199.71 8084 1
(meanf(df1$y))
#>    Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
#> 22          19920 10522 29317  5129 34711
#> 23          19920 10522 29317  5129 34711
#> 24          19920 10522 29317  5129 34711
#> 25          19920 10522 29317  5129 34711
#> 26          19920 10522 29317  5129 34711
#> 27          19920 10522 29317  5129 34711
#> 28          19920 10522 29317  5129 34711
#> 29          19920 10522 29317  5129 34711
#> 30          19920 10522 29317  5129 34711
#> 31          19920 10522 29317  5129 34711
(forecast(s1, h = 12))
#>    Point Forecast Lo 0.8 Hi 0.8 Lo 0.9 Hi 0.9
#> 22          20013  14017  26012  12246  27784
#> 23          19925  14416  25429  12090  26863
#> 24          20165  14012  26321  12193  27901
#> 25          20043  14311  25718  12678  27329
#> 26          19945  14120  25720  12469  27263
#> 27          20151  13970  26095  12162  28138
#> 28          19869  14033  25877  12432  27408
#> 29          20278  14785  26191  13147  27845
#> 30          19993  13754  25961  12028  27631
#> 31          19848  14165  25341  12572  26877
#> 32          19988  14195  25921  12128  27664
#> 33          19995  13372  26372  12235  27975

autoplot(object = forecast(s1, h = 6, biasadj = TRUE, PI = TRUE),
    include = 100) + theme_bw()


df <- as.data.frame(df1)
m <- prophet(df1, growth = "linear", yearly.seasonality = "auto")
#> Error in sampler$call_sampler(c(args, dotlist)): c++ exception (unknown reason)
future <- make_future_dataframe(m, periods = 60, freq = "months",
    include_history = TRUE)
#> Error in eval(expr, envir, enclos): object 'm' not found
forecast <- predict(m, future)
#> Error in eval(expr, envir, enclos): object 'm' not found
plot(m, forecast) + theme_bw()
#> Error in h(simpleError(msg, call)): error in evaluating the argument 'x' in selecting a method for function 'plot': object 'm' not found

plot(greybox::forecast(smooth::auto.adam(df1$y, h = 12, holdout = TRUE,
    ic = "AICc", regressors = "select")))

adamAutoARIMAAir <- auto.adam(df1$y, h = 50)

plot(greybox::forecast(adam(df1$y, main = "Parametric prediction interval",
    h = 5, sim = 1000, lev = 0.99, interval = "prediction")))

plot(greybox::forecast(auto.adam(df1$y, h = 5, interval = "complete",
    nsim = 100, main = "Complete prediction interval")))


plot(greybox::forecast(adam(df1$y, c("CCN", "ANN", "AAN", "AAdN"),
    h = 10, holdout = TRUE, ic = "AICc")))


plot(greybox::forecast(adam(df1$y, model = "NNN", lags = 7, orders = c(0,
    1, 1), constant = TRUE, h = 5, interval = "complete", nsim = 100)))


plot(greybox::forecast((adam(df1$y, model = "NNN", lags = c(24,
    24 * 7, 24 * 365), orders = list(ar = c(3, 2, 2, 2), i = c(2,
    1, 1, 1), ma = c(3, 2, 2, 2), select = TRUE), initial = "backcasting"))))


adamARIMA
#> Error in eval(expr, envir, enclos): object 'adamARIMA' not found
# Apply models
adamPoolBJ <- vector("list", 3)
adamPoolBJ[[1]] <- adam(df1$y, "ZZN", h = 10, holdout = TRUE,
    ic = "BICc")
adamPoolBJ[[2]] <- adam(df1$y, "NNN", orders = list(ar = 3, i = 2,
    ma = 3, select = TRUE), h = 10, holdout = TRUE, ic = "BICc")
adamPoolBJ[[3]] <- adam(df1$y, "MMN", h = 10, holdout = TRUE,
    ic = "BICc", regressors = "select")

# Extract BICc values
adamsICs <- sapply(adamPoolBJ, BICc)

# Calculate weights
adamsICWeights <- adamsICs - min(adamsICs)
adamsICWeights[] <- exp(-0.5 * adamsICWeights)/sum(exp(-0.5 *
    adamsICWeights))
names(adamsICWeights) <- c("ETS", "ARIMA", "ETSX")
round(adamsICWeights, 3)
#>   ETS ARIMA  ETSX 
#> 0.087 0.913 0.000

adamPoolBJForecasts <- vector("list", 3)
# Produce forecasts from the three models
for (i in 1:3) {
    adamPoolBJForecasts[[i]] <- forecast(adamPoolBJ[[i]], h = 10,
        interval = "pred")
}
# Produce combined conditional means and prediction
# intervals
finalForecast <- cbind(sapply(adamPoolBJForecasts, "[[", "mean") %*%
    adamsICWeights, sapply(adamPoolBJForecasts, "[[", "lower") %*%
    adamsICWeights, sapply(adamPoolBJForecasts, "[[", "upper") %*%
    adamsICWeights)
# Give the appropriate names
colnames(finalForecast) <- c("Mean", "Lower bound (2.5%)", "Upper bound (97.5%)")
# Transform the table in the ts format (for convenience)
finalForecast <- ts(finalForecast, start = start(adamPoolBJForecasts[[i]]$mean))
finalForecast
#> Time Series:
#> Start = 12 
#> End = 21 
#> Frequency = 1 
#>     Mean Lower bound (2.5%) Upper bound (97.5%)
#> 12 20727              11290               30164
#> 13 20727               7381               34072
#> 14 20727               4382               37072
#> 15 20727               1853               39600
#> 16 20727               -374               41828
#> 17 20727              -2388               43842
#> 18 20727              -4241               45694
#> 19 20727              -5964               47418
#> 20 20727              -7583               49037
#> 21 20727              -9115               50569

graphmaker(df1$y, finalForecast[, 1], lower = finalForecast[,
    2], upper = finalForecast[, 3], level = 0.95)


plot(forecast(forecastHybrid::hybridModel(df1$y, models = "aen",
    weights = "equal", cvHorizon = 8, num.cores = 4), h = 5))


plot(forecast(forecastHybrid::hybridModel(df1$y, models = "fnst",
    weights = "equal", errorMethod = "RMSE")))


oesModel <- oes(df1$y, model = "YYY", occurrence = "auto")

y0 <- stan_sarima(df1$y, refresh = 0, verbose = FALSE, open_progress = FALSE)
autoplot(forecast(y0, 5, 0.99), "red") + ggplot2::theme_bw()
#> Error in tail.data.frame(data, include): invalid 'n' - must be numeric, possibly NA.

df1$month <- lubridate::month(df1$ds)
gam1 <- mgcv::gam(Sales ~ s(month, bs = "cr", k = 12), data = df1,
    family = gaussian, correlation = SARIMA(form = ~month, p = 1),
    method = "REML") |>
    mgcv::plot.gam(lwd = 3, lty = 1, col = "#d46c5b")


ts_plot(df1, title = "US Monthly Natural Gas Consumption", Ytitle = "Billion Cubic Feet")

months <- c("2022-01-01", "2022-03-01", "2022-06-01")
ubereats <- c(7327.55, 4653.53, 4833.21)
doordash <- c(1304.54, 2000.35, 1643.58)
grubhub <- c(1199.85, 941.68, 623.27)
total <- c(7222.85, 7464.68, 7100.06)

df <- data.frame(months, ubereats, doordash, grubhub, total)
df$months <- as.Date(df$months)


ts_plot(df, title = "US Monthly Natural Gas Consumption", Ytitle = "Billion Cubic Feet")

df$total <- ts(df$total)
plot(greybox::forecast(adam(df$total, h = 5, holdout = TRUE,
    ic = "AICc", regressors = "select")))
#> Error in h(simpleError(msg, call)): error in evaluating the argument 'x' in selecting a method for function 'plot': The number of in-sample observations is not positive. Cannot do anything.

m <- prophet(df, growth = "linear", yearly.seasonality = "auto")
#> Error in fit.prophet(m, df, ...): Dataframe must have columns 'ds' and 'y' with the dates and values respectively.
future <- make_future_dataframe(m, periods = 60, freq = "months",
    include_history = TRUE)
#> Error in eval(expr, envir, enclos): object 'm' not found
forecast <- predict(m, future)
#> Error in eval(expr, envir, enclos): object 'm' not found
plot(m, forecast) + theme_bw()
#> Error in h(simpleError(msg, call)): error in evaluating the argument 'x' in selecting a method for function 'plot': object 'm' not found

# Plotting actual vs. fitted and forecasted
test_forecast(actual = df1$y, forecast.obj = fc, test = test$y)
#> Error in `recycle_columns()`:
#> ! Tibble columns must have compatible sizes.
#> • Size 21: Column `x`.
#> • Size 22: Columns `y` and `text`.
#> ℹ Only values of size one are recycled.
plot(fc)


import delimited "/Users/zad/Dropbox/LessLikely/ts.csv", clear 
#> (encoding automatically selected: ISO-8859-1)
#> (5 vars, 21 obs)
#> 
#> 
#> N Child processes: 8
#> Stata dir:  /Applications/Stata/StataBE.app/Contents/MacOS/stata-be
#> 
#> 
#> Time variable: t, 1 to 21
#>         Delta: 1 unit
#> 
#> 
#> 
#> 
#> Dickey–Fuller test for unit root           Number of obs  = 20
#> Variable: sales                            Number of lags =  0
#> 
#> H0: Random walk with or without drift
#> 
#>                                        Dickey–Fuller
#>                    Test      -------- critical value ---------
#>               statistic           1%           5%          10%
#> --------------------------------------------------------------
#>  Z(t)            -3.069       -4.380       -3.600       -3.240
#> --------------------------------------------------------------
#> MacKinnon approximate p-value for Z(t) = 0.1137.
#> 
#> Regression table
#> ------------------------------------------------------------------------------
#>      D.sales | Coefficient  Std. err.      t    P>|t|     [95% conf. interval]
#> -------------+----------------------------------------------------------------
#>        sales |
#>          L1. |  -.5443834    .177365    -3.07   0.007    -.9185909   -.1701758
#>              |
#>       _trend |   113.3635   213.0871     0.53   0.602     -336.211    562.9381
#>        _cons |   10627.12   2872.968     3.70   0.002     4565.683    16688.55
#> ------------------------------------------------------------------------------
#> 
#> 
#> 
#> 
#> (setting optimization to BHHH)
#> Iteration 0:   log likelihood = -200.56293  
#> Iteration 1:   log likelihood = -199.59002  
#> Iteration 2:   log likelihood =  -199.0124  
#> Iteration 3:   log likelihood = -198.71266  
#> Iteration 4:   log likelihood = -198.61803  
#> (switching optimization to BFGS)
#> Iteration 5:   log likelihood = -198.55245  
#> Iteration 6:   log likelihood =  -198.4958  
#> Iteration 7:   log likelihood =  -198.4736  
#> Iteration 8:   log likelihood = -198.47357  
#> Iteration 9:   log likelihood = -198.47278  
#> Iteration 10:  log likelihood = -198.47278  
#> 
#> ARIMA regression
#> 
#> Sample: 2 thru 21                               Number of obs     =         20
#>                                                 Wald chi2(2)      =       1.08
#> Log likelihood = -198.4728                      Prob > chi2       =     0.5825
#> 
#> ------------------------------------------------------------------------------
#>              |                 OPG
#>      D.sales | Coefficient  std. err.      z    P>|z|     [95% conf. interval]
#> -------------+----------------------------------------------------------------
#> sales        |
#>        _cons |   942.5678   1308.584     0.72   0.471     -1622.21    3507.346
#> -------------+----------------------------------------------------------------
#> ARMA         |
#>           ar |
#>          L1. |  -.2851484   1.365128    -0.21   0.835     -2.96075    2.390453
#>              |
#>           ma |
#>          L1. |  -.0079803   1.389017    -0.01   0.995    -2.730404    2.714444
#> -------------+----------------------------------------------------------------
#>       /sigma |   4927.009   1205.311     4.09   0.000     2564.642    7289.375
#> ------------------------------------------------------------------------------
#> Note: The test of the variance against zero is one sided, and the two-sided
#>       confidence interval is truncated at zero.
#> 
#> 
#> Akaike's information criterion and Bayesian information criterion
#> 
#> -----------------------------------------------------------------------------
#>        Model |          N   ll(null)  ll(model)      df        AIC        BIC
#> -------------+---------------------------------------------------------------
#>            . |         20          .  -198.4728       4   404.9456   408.9285
#> -----------------------------------------------------------------------------
#> Note: BIC uses N = number of observations. See [R] BIC note.
#> 
#> 
#> (setting optimization to BHHH)
#> Iteration 0:   log likelihood = -212.32417  
#> Iteration 1:   log likelihood = -210.34653  
#> Iteration 2:   log likelihood = -209.35975  
#> Iteration 3:   log likelihood = -209.30035  
#> Iteration 4:   log likelihood =  -209.2976  
#> (switching optimization to BFGS)
#> Iteration 5:   log likelihood = -209.29678  
#> Iteration 6:   log likelihood =  -209.2963  
#> Iteration 7:   log likelihood = -209.29617  
#> Iteration 8:   log likelihood = -209.29616  
#> 
#> ARIMA regression
#> 
#> Sample: 1 thru 21                               Number of obs     =         21
#>                                                 Wald chi2(1)      =      40.00
#> Log likelihood = -209.2962                      Prob > chi2       =     0.0000
#> 
#> ------------------------------------------------------------------------------
#>              |                 OPG
#>        sales | Coefficient  std. err.      z    P>|z|     [95% conf. interval]
#> -------------+----------------------------------------------------------------
#> sales        |
#>        _cons |    17540.1   3291.584     5.33   0.000     11088.72    23991.49
#> -------------+----------------------------------------------------------------
#> ARMA         |
#>           ar |
#>          L1. |   .7741051   .1223945     6.32   0.000     .5342164    1.013994
#> -------------+----------------------------------------------------------------
#>       /sigma |   5043.199   718.8693     7.02   0.000     3634.241    6452.157
#> ------------------------------------------------------------------------------
#> Note: The test of the variance against zero is one sided, and the two-sided
#>       confidence interval is truncated at zero.
#> 
#> 
#> Akaike's information criterion and Bayesian information criterion
#> 
#> -----------------------------------------------------------------------------
#>        Model |          N   ll(null)  ll(model)      df        AIC        BIC
#> -------------+---------------------------------------------------------------
#>            . |         21          .  -209.2962       3   424.5923   427.7259
#> -----------------------------------------------------------------------------
#> Note: BIC uses N = number of observations. See [R] BIC note.
#> 
#> 
#> request ignored because of batch mode
#> 
#> 
#> 
#> 
#> file /Users/zad/Dropbox (Old)/LessLikely/content/post/statistics/forecastedsales.png
#>     saved as PNG format

Trace plot of imputed datasets.

Python


from pandas import DataFrame
import statsmodels.api as sm
import matplotlib as plot 

Stock_Market = {'Year': [2017,2017,2017,2017,2017,
                         2017,2017,2017,2017,2017,
                         2017,2017,2016,2016,2016,
                         2016,2016,2016,2016,2016,
                         2016,2016,2016,2016],
                'Month': [12, 11,10,9,8,7,6,5,4,
                          3,2,1,12,11,10,9,8,7,6,
                          5,4,3,2,1],
                'Interest_Rate':[2.75,2.5,2.5,2.5,2.5,2.5,
                                 2.5,2.25,2.25,2.25,2,2,2,1.75,1.75,
                                 1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75,1.75],
                'Unemployment_Rate':[5.3,5.3,5.3,5.3,5.4,5.6,
                                     5.5,5.5,5.5,5.6,5.7,5.9,6,5.9,5.8,6.1,
                                     6.2,6.1,6.1,6.1,5.9,6.2,6.2,6.1],
                'Stock_Index_Price': [1464,1394,1357,1293,1256,1254,1234,1195,1159,1167,1130,
                                      1075,1047,965,943,958,971,949,884,866,876,822,704,719]        
                }

df = DataFrame(Stock_Market,columns=['Year','Month','Interest_Rate',
                                     'Unemployment_Rate','Stock_Index_Price']) 

X = df[['Interest_Rate','Unemployment_Rate']] 

# here we have 2 variables for the multiple linear regression. If you just want to use one variable for simple linear regression, then use X = df['Interest_Rate'] for example

Y = df['Stock_Index_Price']

X = sm.add_constant(X) # adding a constant

model = sm.OLS(Y, X).fit()
predictions = model.predict(X) 

print_model = model.summary()
print(print_model)
#>                             OLS Regression Results                            
#> ==============================================================================
#> Dep. Variable:      Stock_Index_Price   R-squared:                       0.898
#> Model:                            OLS   Adj. R-squared:                  0.888
#> Method:                 Least Squares   F-statistic:                     92.07
#> Date:                Mon, 16 Sep 2024   Prob (F-statistic):           4.04e-11
#> Time:                        21:41:34   Log-Likelihood:                -134.61
#> No. Observations:                  24   AIC:                             275.2
#> Df Residuals:                      21   BIC:                             278.8
#> Df Model:                           2                                         
#> Covariance Type:            nonrobust                                         
#> =====================================================================================
#>                         coef    std err          t      P>|t|      [0.025      0.975]
#> -------------------------------------------------------------------------------------
#> const              1798.4040    899.248      2.000      0.059     -71.685    3668.493
#> Interest_Rate       345.5401    111.367      3.103      0.005     113.940     577.140
#> Unemployment_Rate  -250.1466    117.950     -2.121      0.046    -495.437      -4.856
#> ==============================================================================
#> Omnibus:                        2.691   Durbin-Watson:                   0.530
#> Prob(Omnibus):                  0.260   Jarque-Bera (JB):                1.551
#> Skew:                          -0.612   Prob(JB):                        0.461
#> Kurtosis:                       3.226   Cond. No.                         394.
#> ==============================================================================
#> 
#> Notes:
#> [1] Standard Errors assume that the covariance matrix of the errors is correctly specified.

R


fit <- glm(mpg ~ cyl + disp, mtcars, family = gaussian())
# show the theoretical model
equatiomatic::extract_eq(fit)

\[ E( \operatorname{mpg} ) = \alpha + \beta_{1}(\operatorname{cyl}) + \beta_{2}(\operatorname{disp}) \]


Stata


sysuse auto2, clear
parallel initialize 8, f
mfp: glm price mpg
twoway (fpfitci price mpg, estcmd(glm) fcolor(dkorange%20) alcolor(%40))  || scatter price mpg, mcolor(dkorange) scale(0.75)
graph export "mfp.png", replace
#> . sysuse auto2, clear
#> (1978 automobile data)
#> 
#> . parallel initialize 8, f
#> N Child processes: 8
#> Stata dir:  /Applications/Stata/StataBE.app/Contents/MacOS/stata-be
#> 
#> . mfp: glm price mpg
#> 
#> Deviance for model with all terms untransformed = 1373.079, 74 observations
#> 
#> Variable     Model (vs.)   Deviance  Dev diff.   P      Powers   (vs.)
#> ----------------------------------------------------------------------
#> mpg          Lin.   FP2    1373.079    19.565  0.000+   1         -2 -2
#>              FP1           1356.927     3.413  0.182    -2        
#>              Final         1356.927                     -2
#> 
#> 
#> Transformations of covariates:
#> 
#> -> gen double Impg__1 = X^-2-.2204707671 if e(sample) 
#>    (where: X = mpg/10)
#> 
#> Final multivariable fractional polynomial model for price
#> --------------------------------------------------------------------
#>     Variable |    -----Initial-----          -----Final-----
#>              |   df     Select   Alpha    Status    df    Powers
#> -------------+------------------------------------------------------
#>          mpg |    4     1.0000   0.0500     in      2     -2
#> --------------------------------------------------------------------
#> 
#> Generalized linear models                         Number of obs   =         74
#> Optimization     : ML                             Residual df     =         72
#>                                                   Scale parameter =    5533697
#> Deviance         =  398426217.4                   (1/df) Deviance =    5533697
#> Pearson          =  398426217.4                   (1/df) Pearson  =    5533697
#> 
#> Variance function: V(u) = 1                       [Gaussian]
#> Link function    : g(u) = u                       [Identity]
#> 
#>                                                   AIC             =    18.3909
#> Log likelihood   = -678.4632599                   BIC             =   3.98e+08
#> 
#> ------------------------------------------------------------------------------
#>              |                 OIM
#>        price | Coefficient  std. err.      z    P>|z|     [95% conf. interval]
#> -------------+----------------------------------------------------------------
#>      Impg__1 |   13163.85   2013.016     6.54   0.000      9218.41    17109.29
#>        _cons |   5538.395   289.7737    19.11   0.000     4970.449    6106.341
#> ------------------------------------------------------------------------------
#> Deviance = 1356.927.
#> 
#> . twoway (fpfitci price mpg, estcmd(glm) fcolor(dkorange%20) alcolor(%40))  || scatter pri
#> > ce mpg, mcolor(dkorange) scale(0.75)
#> 
#> . graph export "mfp.png", replace
#> file /Users/zad/Dropbox (Old)/LessLikely/content/post/statistics/mfp.png saved as PNG
#>     format
#> 
#> .

Trace plot of imputed datasets.

clear
set obs 100
/**
Generate variables from a normal distribution like before
*/
generate x = rnormal(0, 1)
generate y = rnormal(0, 1)
/**
We set up our model here 

*/
parallel initialize 8, f
bayesmh y x, likelihood(normal({var})) prior({var}, normal(0, 10)) ///
prior({y:}, normal(0, 10)) rseed(1031) saving(coutput_pred, replace) mcmcsize(1000)
/**
We use the bayespredict command to make predictions from the model
*/
bayespredict (mean:@mean({_resid})) (var:@variance({_resid})), ///
rseed(1031) saving(coutput_pred, replace) 
/**
Then we calculate the posterior predictive P-values
*/
bayesstats ppvalues {mean} using coutput_pred
#> . clear
#> 
#> . set obs 100
#> Number of observations (_N) was 0, now 100.
#> 
#> . /**
#> > Generate variables from a normal distribution like before
#> > */
#> . generate x = rnormal(0, 1)
#> 
#> . generate y = rnormal(0, 1)
#> 
#> . /**
#> > We set up our model here 
#> > 
#> > */
#> . parallel initialize 8, f
#> N Child processes: 8
#> Stata dir:  /Applications/Stata/StataBE.app/Contents/MacOS/stata-be
#> 
#> . bayesmh y x, likelihood(normal({var})) prior({var}, normal(0, 10)) ///
#> > prior({y:}, normal(0, 10)) rseed(1031) saving(coutput_pred, replace) mcmcsize(1000)
#>   
#> Burn-in ...
#> Simulation ...
#> 
#> Model summary
#> ------------------------------------------------------------------------------
#> Likelihood: 
#>   y ~ normal(xb_y,{var})
#> 
#> Priors: 
#>   {y:x _cons} ~ normal(0,10)                                               (1)
#>         {var} ~ normal(0,10)
#> ------------------------------------------------------------------------------
#> (1) Parameters are elements of the linear form xb_y.
#> 
#> Bayesian normal regression                       MCMC iterations  =      3,500
#> Random-walk Metropolis–Hastings sampling         Burn-in          =      2,500
#>                                                  MCMC sample size =      1,000
#>                                                  Number of obs    =        100
#>                                                  Acceptance rate  =      .2068
#>                                                  Efficiency:  min =     .03878
#>                                                               avg =     .07757
#> Log marginal-likelihood = -149.68351                          max =      .1317
#>  
#> ------------------------------------------------------------------------------
#>              |                                                Equal-tailed
#>              |      Mean   Std. dev.     MCSE     Median  [95% cred. interval]
#> -------------+----------------------------------------------------------------
#> y            |
#>            x | -.0758396    .097915   .008532  -.0704775  -.2600037   .1054718
#>        _cons |  .0238076   .1107038   .014035   .0326669  -.1793756   .2293247
#> -------------+----------------------------------------------------------------
#>          var |  .9773565   .1313659   .021096   .9384637   .7882847   1.323255
#> ------------------------------------------------------------------------------
#> 
#> file coutput_pred.dta saved.
#> 
#> . /**
#> > We use the bayespredict command to make predictions from the model
#> > */
#> . bayespredict (mean:@mean({_resid})) (var:@variance({_resid})), ///
#> > rseed(1031) saving(coutput_pred, replace) 
#> 
#> Computing predictions ...
#> 
#> file coutput_pred.dta saved.
#> file coutput_pred.ster saved.
#> 
#> . /**
#> > Then we calculate the posterior predictive P-values
#> > */
#> . bayesstats ppvalues {mean} using coutput_pred
#> 
#> Posterior predictive summary   MCMC sample size =     1,000
#> 
#> -----------------------------------------------------------
#>            T |      Mean   Std. dev.  E(T_obs)  P(T>=T_obs)
#> -------------+---------------------------------------------
#>         mean |  .0004223   .0957513    .008651         .495
#> -----------------------------------------------------------
#> Note: P(T>=T_obs) close to 0 or 1 indicates lack of fit.
#> 
#> .

library(lme4)
library(simr)

# Toy model
fm = lmer(y ~ x + (x | g), data = simdata)

# Extend sample size of `g`
fm_extended_g = extend(fm, along = 'g', n = 4)
# 4 levels of g
pwcurve_4g = powerCurve(fm_extended_g, fixed('x'), along = 'g', breaks = 4, 
                        nsim = 50, seed = 123, 
                        # No progress bar
                        progress = FALSE)
# 6 levels of g
# Create a destination object using any of the power curves above.
all_pwcurve = pwcurve_4g
# Combine results
all_pwcurve$ps = c(pwcurve_4g$ps[1])

# Combine the different numbers of levels.
all_pwcurve$xval = c(pwcurve_4g$nlevels)

print(all_pwcurve)
#> Power for predictor 'x', (95% confidence interval),
#> by number of levels in g:
#>       4: 44.00% (29.99, 58.75) - 40 rows
#> 
#> Time elapsed: 0 h 0 m 4 s

plot(all_pwcurve, xlab = 'Levels of g')


Julia


Pkg.add("Plots")
using Plots
gr()
#> Plots.GRBackend()
Plots.GRBackend()
#> Plots.GRBackend()
plot(Plots.fakedata(50,5),w=3)
#> Plot{Plots.GRBackend() n=5}
png("sample.png")


Computational Environments


Overall Platforms


sessioninfo::session_info(info = c("platform", "external"))
#> Error in info != "auto" && info != "all": 'length = 2' in coercion to 'logical(1)'

R Packages


sessioninfo::session_info(info = "packages")
#> ═ Session info ═════════════════════════════════════════════════════════════════════════════════════════════════════════════════
#> ─ Packages ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
#>  package           * version    date (UTC) lib source
#>  abind               1.4-8      2024-09-12 [1] CRAN (R 4.4.1)
#>  Amelia            * 1.8.2      2024-04-23 [1] CRAN (R 4.4.0)
#>  anytime             0.3.9      2020-08-27 [1] CRAN (R 4.4.0)
#>  arm                 1.14-4     2024-04-01 [1] CRAN (R 4.4.0)
#>  arrayhelpers        1.1-0      2020-02-04 [1] CRAN (R 4.4.0)
#>  askpass             1.2.0      2023-09-03 [1] CRAN (R 4.4.0)
#>  astsa             * 2.1        2024-01-10 [1] CRAN (R 4.4.0)
#>  backports           1.5.0      2024-05-23 [1] CRAN (R 4.4.0)
#>  base64enc           0.1-3      2015-07-28 [1] CRAN (R 4.4.0)
#>  bayesforecast     * 1.0.1      2021-06-17 [1] CRAN (R 4.4.0)
#>  bayesplot         * 1.11.1     2024-02-15 [1] CRAN (R 4.4.0)
#>  bcaboot             0.2-3      2021-05-09 [1] CRAN (R 4.4.0)
#>  binom               1.1-1.1    2022-05-02 [1] CRAN (R 4.4.0)
#>  bitops              1.0-8      2024-07-29 [1] CRAN (R 4.4.0)
#>  blogdown          * 1.19.1     2024-08-17 [1] Github (rstudio/blogdown@c6e73fb)
#>  bookdown            0.40       2024-07-02 [1] CRAN (R 4.4.0)
#>  boot              * 1.3-31     2024-08-28 [1] CRAN (R 4.4.1)
#>  bootImpute        * 1.2.1      2023-06-01 [1] CRAN (R 4.4.0)
#>  bridgesampling      1.1-2      2021-04-16 [1] CRAN (R 4.4.0)
#>  brms              * 2.21.0     2024-03-20 [1] CRAN (R 4.4.0)
#>  Brobdingnag         1.2-9      2022-10-19 [1] CRAN (R 4.4.0)
#>  broom             * 1.0.6      2024-05-17 [1] CRAN (R 4.4.0)
#>  broom.mixed       * 0.2.9.5    2024-04-01 [1] CRAN (R 4.4.0)
#>  bslib               0.8.0      2024-07-29 [1] CRAN (R 4.4.0)
#>  cachem              1.1.0      2024-05-16 [1] CRAN (R 4.4.0)
#>  Cairo             * 1.6-2      2023-11-28 [1] CRAN (R 4.4.0)
#>  car               * 3.1-2      2023-03-30 [1] CRAN (R 4.4.0)
#>  carData           * 3.0-5      2022-01-06 [1] CRAN (R 4.4.0)
#>  caTools             1.18.3     2024-09-04 [1] CRAN (R 4.4.1)
#>  checkmate         * 2.3.2      2024-07-29 [1] CRAN (R 4.4.0)
#>  class               7.3-22     2023-05-03 [1] CRAN (R 4.4.0)
#>  cli                 3.6.3      2024-06-21 [1] CRAN (R 4.4.0)
#>  clipr               0.8.0      2022-02-22 [1] CRAN (R 4.4.0)
#>  clock               0.7.1      2024-07-18 [1] CRAN (R 4.4.0)
#>  cluster             2.1.6      2023-12-01 [1] CRAN (R 4.4.0)
#>  coda              * 0.19-4.1   2024-01-31 [1] CRAN (R 4.4.0)
#>  codetools           0.2-20     2024-03-31 [1] CRAN (R 4.4.0)
#>  colorspace        * 2.1-1      2024-07-26 [1] CRAN (R 4.4.0)
#>  colourpicker        1.3.0      2023-08-21 [1] CRAN (R 4.4.0)
#>  concurve          * 2.8.0      2024-07-12 [1] local
#>  cowplot           * 1.1.3      2024-01-22 [1] CRAN (R 4.4.0)
#>  crayon              1.5.3      2024-06-20 [1] CRAN (R 4.4.0)
#>  crosstalk           1.2.1      2023-11-23 [1] CRAN (R 4.4.0)
#>  curl                5.2.2      2024-08-26 [1] CRAN (R 4.4.1)
#>  data.table          1.16.0     2024-08-27 [1] CRAN (R 4.4.1)
#>  DBI                 1.2.3      2024-06-02 [1] CRAN (R 4.4.0)
#>  DEoptimR            1.1-3      2023-10-07 [1] CRAN (R 4.4.0)
#>  desc                1.4.3      2023-12-10 [1] CRAN (R 4.4.0)
#>  details             0.3.0      2022-03-27 [1] CRAN (R 4.4.0)
#>  dials             * 1.3.0      2024-07-30 [1] CRAN (R 4.4.0)
#>  DiceDesign          1.10       2023-12-07 [1] CRAN (R 4.4.0)
#>  digest              0.6.37     2024-08-19 [1] CRAN (R 4.4.1)
#>  distributional      0.4.0      2024-02-07 [1] CRAN (R 4.4.0)
#>  doParallel        * 1.0.17     2022-02-07 [1] CRAN (R 4.4.0)
#>  doRNG               1.8.6      2023-01-16 [1] CRAN (R 4.4.0)
#>  dplyr             * 1.1.4      2023-11-17 [1] CRAN (R 4.4.0)
#>  DT                  0.33       2024-04-04 [1] CRAN (R 4.4.0)
#>  dygraphs            1.1.1.6    2018-07-11 [1] CRAN (R 4.4.0)
#>  e1071               1.7-16     2024-09-16 [1] CRAN (R 4.4.1)
#>  earth             * 5.3.3      2024-02-26 [1] CRAN (R 4.4.0)
#>  ellipsis            0.3.2      2021-04-29 [1] CRAN (R 4.4.0)
#>  emmeans             1.10.4     2024-08-21 [1] CRAN (R 4.4.1)
#>  equatiomatic        0.3.3      2024-05-26 [1] CRAN (R 4.4.0)
#>  estimability        1.5.1      2024-05-12 [1] CRAN (R 4.4.0)
#>  evaluate            0.24.0     2024-06-10 [1] CRAN (R 4.4.0)
#>  extremevalues       2.3.4      2024-02-13 [1] CRAN (R 4.4.0)
#>  fable             * 0.3.4      2024-03-15 [1] CRAN (R 4.4.0)
#>  fabletools        * 0.4.2      2024-04-22 [1] CRAN (R 4.4.0)
#>  fansi               1.0.6      2023-12-08 [1] CRAN (R 4.4.0)
#>  farver              2.1.2      2024-05-13 [1] CRAN (R 4.4.0)
#>  fastmap             1.2.0      2024-05-15 [1] CRAN (R 4.4.0)
#>  flextable           0.9.6      2024-05-05 [1] CRAN (R 4.4.0)
#>  fontBitstreamVera   0.1.1      2017-02-01 [1] CRAN (R 4.4.0)
#>  fontLiberation      0.1.0      2016-10-15 [1] CRAN (R 4.4.0)
#>  fontquiver          0.2.1      2017-02-01 [1] CRAN (R 4.4.0)
#>  forcats           * 1.0.0      2023-01-29 [1] CRAN (R 4.4.0)
#>  foreach           * 1.5.2      2022-02-02 [1] CRAN (R 4.4.0)
#>  forecast          * 8.23.0     2024-06-20 [1] CRAN (R 4.4.0)
#>  forecastHybrid      5.0.19     2020-08-28 [1] CRAN (R 4.4.0)
#>  foreign             0.8-87     2024-06-26 [1] CRAN (R 4.4.0)
#>  formatR             1.14.1     2024-06-27 [1] https://yihui.r-universe.dev (R 4.4.0)
#>  Formula           * 1.2-5      2023-02-24 [1] CRAN (R 4.4.0)
#>  fracdiff            1.5-3      2024-02-01 [1] CRAN (R 4.4.0)
#>  fs                  1.6.4      2024-04-25 [1] CRAN (R 4.4.0)
#>  furrr               0.3.1      2022-08-15 [1] CRAN (R 4.4.0)
#>  future            * 1.34.0     2024-07-29 [1] CRAN (R 4.4.0)
#>  future.apply      * 1.11.2     2024-03-28 [1] CRAN (R 4.4.0)
#>  gamlss            * 5.4-22     2024-03-20 [1] CRAN (R 4.4.0)
#>  gamlss.data       * 6.0-6      2024-03-14 [1] CRAN (R 4.4.0)
#>  gamlss.dist       * 6.1-1      2023-08-23 [1] CRAN (R 4.4.0)
#>  gdtools             0.4.0      2024-08-28 [1] CRAN (R 4.4.1)
#>  generics            0.1.3      2022-07-05 [1] CRAN (R 4.4.0)
#>  ggcorrplot        * 0.1.4.1    2023-09-05 [1] CRAN (R 4.4.0)
#>  ggdist              3.3.2      2024-03-05 [1] CRAN (R 4.4.0)
#>  ggplot2           * 3.5.1      2024-04-23 [1] CRAN (R 4.4.0)
#>  ggpubr              0.6.0      2023-02-10 [1] CRAN (R 4.4.0)
#>  ggsignif            0.6.4      2022-10-13 [1] CRAN (R 4.4.0)
#>  ggtext            * 0.1.2      2022-09-16 [1] CRAN (R 4.4.0)
#>  glmnet            * 4.1-8      2023-08-22 [1] CRAN (R 4.4.0)
#>  globals             0.16.3     2024-03-08 [1] CRAN (R 4.4.0)
#>  glue                1.7.0      2024-01-09 [1] CRAN (R 4.4.0)
#>  gower               1.0.1      2022-12-22 [1] CRAN (R 4.4.0)
#>  GPfit               1.0-8      2019-02-08 [1] CRAN (R 4.4.0)
#>  greybox           * 2.0.2      2024-08-27 [1] CRAN (R 4.4.1)
#>  gridExtra           2.3        2017-09-09 [1] CRAN (R 4.4.0)
#>  gridtext            0.1.5      2022-09-16 [1] CRAN (R 4.4.0)
#>  gtable              0.3.5      2024-04-22 [1] CRAN (R 4.4.0)
#>  gtools              3.9.5      2023-11-20 [1] CRAN (R 4.4.0)
#>  gWidgets2           1.0-9      2022-01-10 [1] CRAN (R 4.4.0)
#>  gWidgets2tcltk      1.0-8      2022-02-16 [1] CRAN (R 4.4.0)
#>  hardhat             1.4.0      2024-06-02 [1] CRAN (R 4.4.0)
#>  here              * 1.0.1      2020-12-13 [1] CRAN (R 4.4.0)
#>  highr               0.11.1     2024-06-25 [1] https://yihui.r-universe.dev (R 4.4.0)
#>  Hmisc             * 5.1-3      2024-05-28 [1] CRAN (R 4.4.0)
#>  hms                 1.1.3      2023-03-21 [1] CRAN (R 4.4.0)
#>  htmlTable           2.4.3      2024-07-21 [1] CRAN (R 4.4.0)
#>  htmltools         * 0.5.8.1    2024-04-04 [1] CRAN (R 4.4.0)
#>  htmlwidgets         1.6.4      2023-12-06 [1] CRAN (R 4.4.0)
#>  httpuv              1.6.15     2024-03-26 [1] CRAN (R 4.4.0)
#>  httr                1.4.7      2023-08-15 [1] CRAN (R 4.4.0)
#>  igraph              2.0.3      2024-03-13 [1] CRAN (R 4.4.0)
#>  ImputeRobust      * 1.3-1      2018-11-30 [1] CRAN (R 4.4.0)
#>  infer             * 1.0.7      2024-03-25 [1] CRAN (R 4.4.0)
#>  inline              0.3.19     2021-05-31 [1] CRAN (R 4.4.0)
#>  insight             0.20.4     2024-09-01 [1] CRAN (R 4.4.1)
#>  ipred               0.9-15     2024-07-18 [1] CRAN (R 4.4.0)
#>  iterators         * 1.0.14     2022-02-05 [1] CRAN (R 4.4.0)
#>  itertools           0.1-3      2014-03-12 [1] CRAN (R 4.4.0)
#>  janitor             2.2.0      2023-02-02 [1] CRAN (R 4.4.0)
#>  jomo                2.7-6      2023-04-15 [1] CRAN (R 4.4.0)
#>  jquerylib           0.1.4      2021-04-26 [1] CRAN (R 4.4.0)
#>  jsonlite            1.8.8      2023-12-04 [1] CRAN (R 4.4.0)
#>  JuliaCall         * 0.17.5     2022-09-08 [1] CRAN (R 4.4.0)
#>  kableExtra        * 1.4.0      2024-01-24 [1] CRAN (R 4.4.0)
#>  katex             * 1.4.1      2024-07-12 [1] https://ropensci.r-universe.dev (R 4.4.0)
#>  keras             * 2.15.0     2024-04-20 [1] CRAN (R 4.4.0)
#>  km.ci               0.5-6      2022-04-06 [1] CRAN (R 4.4.0)
#>  KMsurv              0.1-5      2012-12-03 [1] CRAN (R 4.4.0)
#>  knitr             * 1.48.1     2024-07-12 [1] https://yihui.r-universe.dev (R 4.4.0)
#>  labeling            0.4.3      2023-08-29 [1] CRAN (R 4.4.0)
#>  laeken              0.5.3      2024-01-25 [1] CRAN (R 4.4.0)
#>  later               1.3.2      2023-12-06 [1] CRAN (R 4.4.0)
#>  latex2exp         * 0.9.6      2022-11-28 [1] CRAN (R 4.4.0)
#>  lattice           * 0.22-6     2024-03-20 [1] CRAN (R 4.4.0)
#>  lava                1.8.0      2024-03-05 [1] CRAN (R 4.4.0)
#>  lazyeval            0.2.2      2019-03-15 [1] CRAN (R 4.4.0)
#>  lhs                 1.2.0      2024-06-30 [1] CRAN (R 4.4.0)
#>  lifecycle           1.0.4      2023-11-07 [1] CRAN (R 4.4.0)
#>  listenv             0.9.1      2024-01-29 [1] CRAN (R 4.4.0)
#>  lme4              * 1.1-35.5   2024-07-03 [1] CRAN (R 4.4.0)
#>  lmtest              0.9-40     2022-03-21 [1] CRAN (R 4.4.0)
#>  loo               * 2.8.0      2024-07-03 [1] CRAN (R 4.4.0)
#>  lubridate         * 1.9.3      2023-09-27 [1] CRAN (R 4.4.0)
#>  magick              2.8.4      2024-07-12 [1] https://ropensci.r-universe.dev (R 4.4.0)
#>  magrittr          * 2.0.3      2022-03-30 [1] CRAN (R 4.4.0)
#>  markdown            1.13       2024-06-04 [1] CRAN (R 4.4.0)
#>  MASS              * 7.3-61     2024-06-13 [1] CRAN (R 4.4.0)
#>  mathjaxr            1.6-0      2022-02-28 [1] CRAN (R 4.4.0)
#>  Matrix            * 1.7-0      2024-03-22 [1] CRAN (R 4.4.0)
#>  MatrixModels        0.5-3      2023-11-06 [1] CRAN (R 4.4.0)
#>  matrixStats         1.4.1      2024-09-08 [1] CRAN (R 4.4.1)
#>  mcmc                0.9-8      2023-11-16 [1] CRAN (R 4.4.0)
#>  MCMCpack          * 1.7-1      2024-08-27 [1] CRAN (R 4.4.1)
#>  memoise             2.0.1      2021-11-26 [1] CRAN (R 4.4.0)
#>  metadat             1.2-0      2022-04-06 [1] CRAN (R 4.4.0)
#>  metafor             4.6-0      2024-03-28 [1] CRAN (R 4.4.0)
#>  mgcv              * 1.9-1      2023-12-21 [1] CRAN (R 4.4.0)
#>  mi                * 1.1        2022-06-06 [1] CRAN (R 4.4.0)
#>  mice              * 3.16.0     2023-06-05 [1] CRAN (R 4.4.0)
#>  miceadds          * 3.17-44    2024-01-09 [1] CRAN (R 4.4.0)
#>  miceFast          * 0.8.2      2022-11-17 [1] CRAN (R 4.4.0)
#>  mime                0.12.1     2024-06-26 [1] https://yihui.r-universe.dev (R 4.4.0)
#>  miniUI              0.1.1.1    2018-05-18 [1] CRAN (R 4.4.0)
#>  minqa               1.2.8      2024-08-17 [1] CRAN (R 4.4.0)
#>  missForest        * 1.5        2022-04-14 [1] CRAN (R 4.4.0)
#>  mitml             * 0.4-5      2023-03-08 [1] CRAN (R 4.4.0)
#>  mitools             2.4        2019-04-26 [1] CRAN (R 4.4.0)
#>  modeldata         * 1.4.0      2024-06-19 [1] CRAN (R 4.4.0)
#>  modeltime         * 1.3.0      2024-07-29 [1] CRAN (R 4.4.0)
#>  multcomp            1.4-26     2024-07-18 [1] CRAN (R 4.4.0)
#>  munsell             0.5.1      2024-04-01 [1] CRAN (R 4.4.0)
#>  mvtnorm           * 1.3-1      2024-09-03 [1] CRAN (R 4.4.1)
#>  nlme              * 3.1-166    2024-08-14 [1] CRAN (R 4.4.0)
#>  nloptr              2.1.1      2024-06-25 [1] CRAN (R 4.4.0)
#>  nnet                7.3-19     2023-05-03 [1] CRAN (R 4.4.0)
#>  numDeriv            2016.8-1.1 2019-06-06 [1] CRAN (R 4.4.0)
#>  officer             0.6.6      2024-05-05 [1] CRAN (R 4.4.0)
#>  opdisDownsampling   1.0.1      2024-04-15 [1] CRAN (R 4.4.0)
#>  openssl             2.2.1      2024-08-16 [1] CRAN (R 4.4.0)
#>  pan                 1.9        2023-12-07 [1] CRAN (R 4.4.0)
#>  pander              0.6.5      2022-03-18 [1] CRAN (R 4.4.0)
#>  parallelly        * 1.38.0     2024-07-27 [1] CRAN (R 4.4.0)
#>  parsnip           * 1.2.1      2024-03-22 [1] CRAN (R 4.4.0)
#>  pbkrtest            0.5.3      2024-06-26 [1] CRAN (R 4.4.0)
#>  pbmcapply         * 1.5.1      2022-04-28 [1] CRAN (R 4.4.0)
#>  performance       * 0.12.3     2024-09-02 [1] CRAN (R 4.4.1)
#>  pillar              1.9.0      2023-03-22 [1] CRAN (R 4.4.0)
#>  pkgbuild            1.4.4      2024-03-17 [1] CRAN (R 4.4.0)
#>  pkgconfig           2.0.3      2019-09-22 [1] CRAN (R 4.4.0)
#>  plotly            * 4.10.4     2024-01-13 [1] CRAN (R 4.4.0)
#>  plotmo            * 3.6.4      2024-08-31 [1] CRAN (R 4.4.1)
#>  plotrix           * 3.8-4      2023-11-10 [1] CRAN (R 4.4.0)
#>  plyr                1.8.9      2023-10-02 [1] CRAN (R 4.4.0)
#>  png                 0.1-8      2022-11-29 [1] CRAN (R 4.4.0)
#>  polspline           1.1.25     2024-05-10 [1] CRAN (R 4.4.0)
#>  posterior         * 1.6.0      2024-07-03 [1] CRAN (R 4.4.0)
#>  pracma              2.4.4      2023-11-10 [1] CRAN (R 4.4.0)
#>  prettyunits         1.2.0      2023-09-24 [1] CRAN (R 4.4.0)
#>  prodlim             2024.06.25 2024-06-24 [1] CRAN (R 4.4.0)
#>  ProfileLikelihood * 1.3        2023-08-25 [1] CRAN (R 4.4.0)
#>  progress          * 1.2.3      2023-12-06 [1] CRAN (R 4.4.0)
#>  promises            1.3.0      2024-04-05 [1] CRAN (R 4.4.0)
#>  prophet           * 1.0        2021-03-30 [1] CRAN (R 4.4.0)
#>  proxy               0.4-27     2022-06-09 [1] CRAN (R 4.4.0)
#>  pryr                0.1.6      2023-01-17 [1] CRAN (R 4.4.0)
#>  purrr             * 1.0.2      2023-08-10 [1] CRAN (R 4.4.0)
#>  qqconf              1.3.2      2023-04-14 [1] CRAN (R 4.4.0)
#>  qqplotr           * 0.0.6      2023-01-25 [1] CRAN (R 4.4.0)
#>  quadprog            1.5-8      2019-11-20 [1] CRAN (R 4.4.0)
#>  quantmod            0.4.26     2024-02-14 [1] CRAN (R 4.4.0)
#>  quantreg          * 5.98       2024-05-26 [1] CRAN (R 4.4.0)
#>  QuickJSR            1.3.1      2024-07-14 [1] CRAN (R 4.4.0)
#>  R6                  2.5.1      2021-08-19 [1] CRAN (R 4.4.0)
#>  ragg                1.3.3      2024-09-11 [1] CRAN (R 4.4.1)
#>  randomForest      * 4.7-1.1    2022-05-23 [1] CRAN (R 4.4.0)
#>  ranger              0.16.0     2023-11-12 [1] CRAN (R 4.4.0)
#>  rappdirs            0.3.3      2021-01-31 [1] CRAN (R 4.4.0)
#>  rapportools         1.1        2022-03-22 [1] CRAN (R 4.4.0)
#>  Rcpp              * 1.0.13     2024-07-17 [1] CRAN (R 4.4.0)
#>  RcppParallel        5.1.9      2024-08-19 [1] CRAN (R 4.4.1)
#>  readr             * 2.1.5      2024-01-10 [1] CRAN (R 4.4.0)
#>  recipes           * 1.1.0      2024-07-04 [1] CRAN (R 4.4.0)
#>  rematch2            2.1.2      2020-05-01 [1] CRAN (R 4.4.0)
#>  reshape2          * 1.4.4      2020-04-09 [1] CRAN (R 4.4.0)
#>  reticulate        * 1.39.0     2024-09-05 [1] CRAN (R 4.4.1)
#>  rlang             * 1.1.4      2024-06-04 [1] CRAN (R 4.4.0)
#>  RLRsim              3.1-8      2022-03-16 [1] CRAN (R 4.4.0)
#>  rmarkdown         * 2.28       2024-08-17 [1] CRAN (R 4.4.0)
#>  rms               * 6.8-2      2024-08-23 [1] CRAN (R 4.4.1)
#>  rngtools            1.5.2      2021-09-20 [1] CRAN (R 4.4.0)
#>  robustbase          0.99-4     2024-08-19 [1] CRAN (R 4.4.1)
#>  rpart               4.1.23     2023-12-05 [1] CRAN (R 4.4.0)
#>  rprojroot           2.0.4      2023-11-05 [1] CRAN (R 4.4.0)
#>  rsample           * 1.2.1      2024-03-25 [1] CRAN (R 4.4.0)
#>  rstan             * 2.32.6     2024-03-05 [1] CRAN (R 4.4.0)
#>  rstanarm          * 2.32.1     2024-01-18 [1] CRAN (R 4.4.0)
#>  rstantools          2.4.0      2024-01-31 [1] CRAN (R 4.4.0)
#>  rstatix             0.7.2      2023-02-01 [1] CRAN (R 4.4.0)
#>  rstudioapi          0.16.0     2024-03-24 [1] CRAN (R 4.4.0)
#>  sandwich            3.1-1      2024-09-15 [1] CRAN (R 4.4.1)
#>  sass                0.4.9      2024-03-15 [1] CRAN (R 4.4.0)
#>  scales            * 1.3.0      2023-11-28 [1] CRAN (R 4.4.0)
#>  sessioninfo         1.2.2      2021-12-06 [1] CRAN (R 4.4.0)
#>  shape               1.4.6.1    2024-02-23 [1] CRAN (R 4.4.0)
#>  shiny               1.9.1      2024-08-01 [1] CRAN (R 4.4.0)
#>  shinyjs             2.1.0      2021-12-23 [1] CRAN (R 4.4.0)
#>  shinystan           2.6.0      2022-03-03 [1] CRAN (R 4.4.0)
#>  shinythemes         1.2.0      2021-01-25 [1] CRAN (R 4.4.0)
#>  showtext          * 0.9-7      2024-03-02 [1] CRAN (R 4.4.0)
#>  showtextdb        * 3.0        2020-06-04 [1] CRAN (R 4.4.0)
#>  simr              * 1.0.7      2023-04-13 [1] CRAN (R 4.4.0)
#>  smooth            * 4.0.2      2024-06-19 [1] CRAN (R 4.4.0)
#>  snakecase           0.11.1     2023-08-27 [1] CRAN (R 4.4.0)
#>  sp                  2.1-4      2024-04-30 [1] CRAN (R 4.4.0)
#>  SparseM           * 1.84-2     2024-07-17 [1] CRAN (R 4.4.0)
#>  StanHeaders       * 2.32.10    2024-07-15 [1] CRAN (R 4.4.0)
#>  Statamarkdown     * 0.9.2      2023-12-04 [1] CRAN (R 4.4.0)
#>  statmod             1.5.0      2023-01-06 [1] CRAN (R 4.4.0)
#>  stringi             1.8.4      2024-05-06 [1] CRAN (R 4.4.0)
#>  stringr           * 1.5.1      2023-11-14 [1] CRAN (R 4.4.0)
#>  summarytools      * 1.0.1      2022-05-20 [1] CRAN (R 4.4.0)
#>  survival            3.7-0      2024-06-05 [1] CRAN (R 4.4.0)
#>  survminer           0.4.9      2021-03-09 [1] CRAN (R 4.4.0)
#>  survMisc            0.5.6      2022-04-07 [1] CRAN (R 4.4.0)
#>  svglite           * 2.1.3      2023-12-08 [1] CRAN (R 4.4.0)
#>  svgPanZoom          0.3.4      2020-02-15 [1] CRAN (R 4.4.0)
#>  svUnit              1.0.6      2021-04-19 [1] CRAN (R 4.4.0)
#>  sysfonts          * 0.8.9      2024-03-02 [1] CRAN (R 4.4.0)
#>  systemfonts         1.1.0      2024-05-15 [1] CRAN (R 4.4.0)
#>  tensorA             0.36.2.1   2023-12-13 [1] CRAN (R 4.4.0)
#>  tensorflow        * 2.16.0     2024-04-15 [1] CRAN (R 4.4.0)
#>  texPreview        * 2.1.0      2024-01-24 [1] CRAN (R 4.4.0)
#>  texreg              1.39.4     2024-07-24 [1] CRAN (R 4.4.0)
#>  textshaping         0.4.0      2024-05-24 [1] CRAN (R 4.4.0)
#>  tfautograph       * 0.3.2      2021-09-17 [1] CRAN (R 4.4.0)
#>  tfdatasets        * 2.17.0     2024-07-16 [1] CRAN (R 4.4.0)
#>  tfruns              1.5.3      2024-04-19 [1] CRAN (R 4.4.0)
#>  TH.data             1.1-2      2023-04-17 [1] CRAN (R 4.4.0)
#>  thief               0.3        2018-01-24 [1] CRAN (R 4.4.0)
#>  threejs             0.3.3      2020-01-21 [1] CRAN (R 4.4.0)
#>  tibble            * 3.2.1      2023-03-20 [1] CRAN (R 4.4.0)
#>  tidybayes         * 3.0.7      2024-09-15 [1] CRAN (R 4.4.1)
#>  tidymodels        * 1.2.0      2024-03-25 [1] CRAN (R 4.4.0)
#>  tidyr             * 1.3.1      2024-01-24 [1] CRAN (R 4.4.0)
#>  tidyselect          1.2.1      2024-03-11 [1] CRAN (R 4.4.0)
#>  tidyverse         * 2.0.0      2023-02-22 [1] CRAN (R 4.4.0)
#>  timechange          0.3.0      2024-01-18 [1] CRAN (R 4.4.0)
#>  timeDate            4032.109   2023-12-14 [1] CRAN (R 4.4.0)
#>  timetk            * 2.9.0      2023-10-31 [1] CRAN (R 4.4.0)
#>  tinytex           * 0.53       2024-09-15 [1] CRAN (R 4.4.1)
#>  tseries             0.10-57    2024-08-13 [1] CRAN (R 4.4.0)
#>  tsibble           * 1.1.5      2024-06-27 [1] CRAN (R 4.4.0)
#>  tsibbledata       * 0.4.1      2022-09-01 [1] CRAN (R 4.4.0)
#>  TSstudio          * 0.1.7      2023-08-09 [1] CRAN (R 4.4.0)
#>  TTR                 0.24.4     2023-11-28 [1] CRAN (R 4.4.0)
#>  tune              * 1.2.1      2024-04-18 [1] CRAN (R 4.4.0)
#>  twosamples          2.0.1      2023-06-23 [1] CRAN (R 4.4.0)
#>  tzdb                0.4.0      2023-05-12 [1] CRAN (R 4.4.0)
#>  urca                1.3-4      2024-05-27 [1] CRAN (R 4.4.0)
#>  utf8                1.2.4      2023-10-22 [1] CRAN (R 4.4.0)
#>  uuid                1.2-1      2024-07-29 [1] CRAN (R 4.4.0)
#>  V8                  5.0.0      2024-08-16 [1] CRAN (R 4.4.0)
#>  vcd                 1.4-13     2024-09-16 [1] CRAN (R 4.4.1)
#>  vctrs               0.6.5      2023-12-01 [1] CRAN (R 4.4.0)
#>  VIM               * 6.2.2      2022-08-25 [1] CRAN (R 4.4.0)
#>  viridisLite         0.4.2      2023-05-02 [1] CRAN (R 4.4.0)
#>  wesanderson       * 0.3.7      2023-10-31 [1] CRAN (R 4.4.0)
#>  whisker             0.4.1      2022-12-05 [1] CRAN (R 4.4.0)
#>  withr               3.0.1      2024-07-31 [1] CRAN (R 4.4.0)
#>  workflows         * 1.1.4      2024-02-19 [1] CRAN (R 4.4.0)
#>  workflowsets      * 1.1.0      2024-03-21 [1] CRAN (R 4.4.0)
#>  xfun              * 0.47       2024-08-17 [1] CRAN (R 4.4.0)
#>  xgboost           * 1.7.8.1    2024-07-24 [1] CRAN (R 4.4.0)
#>  xml2                1.3.6      2023-12-04 [1] CRAN (R 4.4.0)
#>  xtable              1.8-4      2019-04-21 [1] CRAN (R 4.4.0)
#>  xts                 0.14.0     2024-06-05 [1] CRAN (R 4.4.0)
#>  yaml                2.3.10     2024-07-26 [1] CRAN (R 4.4.0)
#>  yardstick         * 1.3.1      2024-03-21 [1] CRAN (R 4.4.0)
#>  zeallot             0.1.0      2018-01-28 [1] CRAN (R 4.4.0)
#>  zip                 2.3.1      2024-01-27 [1] CRAN (R 4.4.0)
#>  zoo                 1.8-12     2023-04-13 [1] CRAN (R 4.4.0)
#> 
#>  [1] /Users/zad/Library/R/arm64/4.4/library
#>  [2] /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/library
#> 
#> ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

Stata Environment


display %tcMonth_dd,CCYY_hh:MM_am now()
about
#>  September 16,2024 9:42 pm
#> 
#> 
#> Stata/BE 17.0 for Mac (Apple Silicon)
#> Revision 21 May 2024
#> Copyright 1985-2021 StataCorp LLC
#> 
#> Total physical memory: 24.00 GB
#> 
#> Stata license: Single-user  perpetual
#> Serial number: 301706317553
#>   Licensed to: Zad Rafi
#>                AMG

References




Help support the website!






See also:

Article Progress Tracker