forked from jgabry/stancon2018_intro
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpoisson-hurdle.stan
64 lines (60 loc) · 1.76 KB
/
poisson-hurdle.stan
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
/* Stan program
* for Poisson "hurdle" model (also with upper truncation)
*
* Note: for simplicity I assume that there is only one way to obtain
* a zero, as opposed to some zero-inflated models where there are
* multiple processes that lead to a zero. So in this example, y is
* zero with probability theta and y is modeled as a truncated poisson
* if greater than zero.
*/
data {
int<lower=1> N;
int<lower=0> y[N];
}
transformed data {
int U = max(y); // upper truncation point
}
parameters {
real<lower=0,upper=1> theta; // Pr(y = 0)
real<lower=0> lambda; // Poisson rate parameter (if y > 0)
}
model {
lambda ~ exponential(0.2);
for (n in 1:N) {
if (y[n] == 0) {
target += log(theta); // log(Pr(y = 0))
} else {
target += log1m(theta); // log(Pr(y > 0))
y[n] ~ poisson(lambda) T[1,U]; // truncated poisson
}
}
}
generated quantities {
int y_rep[N]; // Draws from posterior preditive dist
real log_lik[N]; // Pointwise log-likelihood contributions
for (n in 1:N) {
// Draw from posterior preditive distribution
if (bernoulli_rng(theta)) {
y_rep[n] = 0;
} else {
// use a while loop to get draws from truncated poisson
int w; // temporary variable
w = poisson_rng(lambda);
while (w == 0 || w > U) {
w = poisson_rng(lambda);
}
y_rep[n] = w;
}
// Compute and store the pointwise log-likelihood
// (this will be used for the last section of the R markdown document
// which deals with predictive performance)
if (y[n] == 0) {
log_lik[n] = log(theta);
} else {
log_lik[n] =
log1m(theta)
+ poisson_lpmf(y[n] | lambda)
- log_diff_exp(poisson_lcdf(U | lambda), poisson_lcdf(0 | lambda));
}
}
}