1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
DEFINE !BINCINT (N !TOKENS(1) / X !TOKENS(1) /CONF !TOKENS(1) ).
*.
* .
* Given data from a binomial experiment with X = number of 
* successes, and N = number of trials, this program returns
* the exact (not asymptotic) upper and lower confidence limits .
* .
title 'Exact Confidence Limits for a Binomial Parameter'.
* .
* Sample Usage:  !BINCINT N=10 X=3 CONF=95.0 .
*.
*   Mike Lacy, Dec 1996.
* Create a dummy case for SPSS to work with .
new file .
input program.
compute dummy = 1 .
end case.
end file.
end input program.
*  .
***********************************************.
* Constants, user modifiable .
compute toler = 1.0e-4.              /* desired relative precision . 
compute maxiter = 40.                /* Maximum iterations allowed below .
* .
string lconverg (a3) uconverg (a3) . /* flags to indicate convergence
* .
* Pick up macro parameters  and compute with them as necessary .
compute N = !N .
compute x = !X .
compute alpha2 =  (1 - (!CONF/100))/2 .
* Some minor computations.
compute pyes = x/n.      
compute se = sqrt(pyes * (1-pyes)/N).
*.
*.
*******************************************.
*  Find limits, with first pass doing lower limit
   and second pass doing upper limit .
* .
loop dolow = 1 to 0  by -1.
do if dolow .
* Initialize for lower limit .
+  compute lowbound = 0.0 . /*anything too low will work
+  compute upbound  = pyes . /*anything too high will work
*  Generate two guesses for the lower limit.  
+  compute OldGuess = pyes.
+  compute guess = pyes - probit(alpha2) * se.
+  if (guess >= upbound) or (guess <= lowbound) 
      guess = (lowbound + upbound)/2.0.
*   Determine probabilities for initial guesses.
+   compute p = 1 - cdf.binom(x-1,n,guess).
+   compute OldP = 1- cdf.binom(x-1,n,OldGuess).
+   compute lconverg = 'no'.
*.
else.  /* Upper Limit .
*.
*  Initialize for upper limit .
+  compute lowbound = pyes . /*anything too low will work
+  compute upbound  = 1.0 . /*anything too high will work
*  Generate two guesses for the upper limit.  
+  compute OldGuess = pyes.
+  compute guess = pyes + probit(1-alpha2) * se.
+  if (guess >= upbound) or (guess <= lowbound) 
      guess = (lowbound + upbound)/2.0.
*  Determine probabilities for initial guesses.
+  compute p = cdf.binom(x,n,guess).
+  compute OldP = cdf.binom(x,n,OldGuess).
+  compute uconverg = 'no'.
* .
end if.   /* End initialization .
*.
*.
*. Go ahead and find the limit, checking first for extreme cases.
*.
do if (x = 0) and dolow .      /* Observed X at lower limit, nothing to do
+  compute lcl = 0.0.
+  compute lconverg = 'yes'.
else if (x=N) and not dolow.   /* Observed X at upper limit, nothing to do            
+  compute ucl = 1.0 .
+  compute uconverg = 'yes' . 
else .
*  Use iteration to find the limit    The technique is the secant    .
*  method (empirical derivative), supplemented with bisection .
*. 
+  compute numiter = 0 .
+  compute diff = p - alpha2 .
*.
*          ITERATION LOOP .
+  loop .  
+    compute numiter = numiter + 1 .
*    Reset bounds on iteration: different for upper and lower limit .
+    do if (diff < 0) and dolow .  
+      compute lowbound = guess.
+    else if (diff <0) and not dolow. 
+      compute upbound = guess .
+    else if (diff >= 0) and dolow .
+      compute upbound = guess .
+    else if (diff >= 0) and not dolow .
+      compute lowbound = guess .
+    end if .
* .
*    Construct next guess and probability, while protecting 
*    against bad guesses and wild secant slopes .
+    compute slope = (OldP - P)/(OldGuess - Guess) .
+    compute OldP = P .
+    compute OldGuess = Guess .
+    do if ((Abs(slope) < 1e-5 ) or (Abs(slope) > 1e+5)) .
+      compute guess = (upbound + lowbound)/2.0 .
+    else .
+      compute guess = guess - diff/slope .
+      if (guess >= upbound) or (guess <= lowbound) 
         guess = (lowbound + upbound)/2.0.
+    end if .
+    do if dolow.
+      compute p = 1- cdf.Binom(x-1,n,Guess) .
+    else if not dolow .
+      compute p = cdf.Binom(x,n,Guess) .
+    end if.
+    compute diff = p - alpha2 .
*.
+  end loop if ( (abs(diff) <= toler * alpha2)
         or    (numiter > maxiter) ) .
* .
end if . /* if had to find the limit by iteration .
*.
do if dolow .
+   compute lcl = guess .
+   compute l_iter = numiter .
+   if (numiter < maxiter) lconverg = 'yes'.
else if not dolow .
+   compute ucl = guess .
+   compute u_iter = numiter .
+   if (numiter < maxiter) uconverg = 'yes'.
end if . /* if dolow .
end loop . /* loop dolow = 1 to 0 for lower and upper limit .
*.
format pyes (f7.5) ucl (f7.5) lcl (f7.5)  
       x (f6.0)  n (f6.0)  l_iter (f6.0) u_iter (f6.0).
list x n lcl pyes ucl l_iter lconverg u_iter uconverg.
!ENDDEFINE .
* .
!BINCINT N=10 X=3 CONF=95.0 .
* .
* X = observed # of "Yes" . 
* N = number of trials .
* lcl = lower confidence limit .
* pyes = observed proportion of "Yes" .
* ucl = upper confidence limit .
* l_iter = number of iterations used to find lower limit .
* l_converg = Did iteration for lower limit converge? .
* 'u_iter = number of iterations used to find upper limit .
* 'u_converg = Did iteration for upper limit converge?  .
* .
* .
exe .