@article{TQMP13-2-127,
author = {Lang, Albert-Georg },
journal = {The Quantitative Methods for Psychology},
publisher = {TQMP},
title = {Is intermediately inspecting statistical data necessarily a bad research practice?},
year = {2017},
volume = {13},
number = {2},
url = {http://www.tqmp.org/RegularArticles/vol13-2/p127/p127.pdf },
pages = {127-140},
abstract = {Intermediately inspecting the statistical data of a running experiment is justifiably referred to as a bad research practice. With only a few intermediate inspections, Type I error rates inflate to a multiple of the previously defined critical falpha. On the other hand, there are research areas where intermediately inspecting data is extremely desirable if not even necessary. For this reason, in medical research, mathematical methods are known as "group-sequential testing" which compensate Type I error cumulation by adjusting critical alpha. In the field of psychological research, these methods are widely unknown or at least used very rarely. One reason may be that group-sequential tests focus on test statistics based on the normal distribution, mainly the t-test, while in psychological research often more complex experimental designs are used. The computer program APriot has been developed to enable the user to conduct Monte-Carlo simulations of what happens when intermediately inspecting the data of an ANOVA. The simulations show clearly how bad a research practice intermediately inspecting data (without adjusting alpha) is. Further, it is shown that in many cases adjusted values of alpha can be found by simulations such that the ANOVA can be used together with group-sequential testing similarly as the t-test. A last set of demonstrations shows how the power and the required number of participants of a group-sequential test can be estimated and that group-sequential testing can be favorable from an economic point of view.},
doi = {10.20982/tqmp.13.2.p127}
}