@article {660400, title = {Differentially Private Survey Research}, journal = {American Journal of Political Science}, year = {Forthcoming}, abstract = {Survey researchers have long sought to protect the privacy of their respondents via de-identification (removing names and other directly identifying information) before sharing data. Although these procedures can help, recent research demonstrates that they fail to protect respondents from intentional re-identification attacks, a problem that threatens to undermine vast survey enterprises in academia, government, and industry. This is especially a problem in political science because political beliefs are not merely the subject of our scholarship; they represent some of the most important information respondents want to keep private. We confirm the problem in practice by re-identifying individuals from a survey about a controversial referendum declaring life beginning at conception. We build on the concept of "differential privacy" to offer new data sharing procedures with mathematical guarantees for protecting respondent privacy and statistical validity guarantees for social scientists analyzing differentially private data. \ The cost of these new procedures is larger standard errors, which can be overcome with somewhat larger sample sizes.}, author = {Georgina Evans and Gary King and Adam D. Smith and Abhradeep Thakurta} } @article {677821, title = {The Essential Role of Statistical Inference in Evaluating Electoral Systems: A Response to DeFord et al.}, journal = {Political Analysis}, year = {Forthcoming}, month = {2023}, abstract = {Katz, King, and Rosenblatt (2020) introduces a theoretical framework for understanding redistricting and electoral systems, built on basic statistical and social science principles of inference. DeFord et al. (Forthcoming, 2021) instead focuses solely on descriptive measures, which lead to the problems identified in our arti- cle. In this paper, we illustrate the essential role of these basic principles and then offer statistical, mathematical, and substantive corrections required to apply DeFord et al.{\textquoteright}s calculations to social science questions of interest, while also showing how to easily resolve all claimed paradoxes and problems. We are grateful to the authors for their interest in our work and for this opportunity to clarify these principles and our theoretical framework.\ }, author = {Jonathan Katz and Gary King and Elizabeth Rosenblatt} } @article {643747, title = {Statistically Valid Inferences from Privacy Protected Data}, journal = {American Political Science Review}, year = {Forthcoming}, abstract = {Unprecedented quantities of data that could help social scientists understand and ameliorate the challenges of human society are presently locked away inside companies, governments, and other organizations, in part because of privacy concerns. We address this problem with a general-purpose data access and analysis system with mathematical guarantees of privacy for research subjects, and statistical validity guarantees for researchers seeking social science insights. We build on the standard of {\textquoteleft}{\textquoteleft}differential privacy,{\textquoteright}{\textquoteright} correct for biases induced by the privacy-preserving procedures, provide a proper accounting of uncertainty, and impose minimal constraints on the choice of statistical methods and quantities estimated. We also replicate two recent published articles and show how we can obtain approximately the same substantive results while simultaneously protecting the privacy. Our approach is simple to use and computationally efficient; we also offer open source software that implements all our methods.}, url = {https://www.doi.org/10.1017/S0003055422001411}, author = {Georgina Evans and Gary King and Margaret Schwenzfeier and Abhradeep Thakurta} } @article {701356, title = {A simulation-based comparative effectiveness analysis of policies to improve global maternal health outcomes}, journal = {Nature Medicne}, year = {2023}, abstract = {The Sustainable Development Goals include a target to reduce the global maternal mortality ratio (MMR) to less than 70 maternal deaths per 100,000 live births by 2030, with no individual country exceeding 140. However, on current trends the goals are unlikely to be met. We used the empirically calibrated Global Maternal Health microsimulation model, which simulates individual women in 200 countries and territories to evaluate the impact of different interventions and strategies from 2022 to 2030. Although individual interventions yielded fairly small reductions in maternal mortality, integrated strategies were more effective. A strategy to simultaneously increase facility births, improve the availability of clinical services and quality of care at facilities, and improve linkages to care would yield a projected global MMR of 72 (95\% uncertainty interval (UI) = 58{\textendash}87) in 2030. A comprehensive strategy adding family planning and community-based interventions would have an even larger impact, with a projected MMR of 58 (95\% UI = 46{\textendash}70). Although integrated strategies consisting of multiple interventions will probably be needed to achieve substantial reductions in maternal mortality, the relative priority of different interventions varies by setting. Our regional and country-level estimates can help guide priority setting in specific contexts to accelerate improvements in maternal health.}, url = {https://www.nature.com/articles/s41591-023-02311-w}, author = {Ward, Zachary J. and Rifat Atun and Gary King and Sequeira Dmello, Brenda and Goldie, Sue J.} } @article {701351, title = {Simulation-based estimates and projections of global, regional and country-level maternal mortality by cause, 1990{\textendash}2050}, journal = {Nature Medicine}, year = {2023}, abstract = {Maternal mortality is a major global health challenge. Although progress has been made globally in reducing maternal deaths, measurement remains challenging given the many causes and frequent underreporting of maternal deaths. We developed the Global Maternal Health microsimulation model for women in 200 countries and territories, accounting for individual fertility preferences and clinical histories. Demographic, epidemiologic, clinical and health system data were synthesized from multiple sources, including the medical literature, Civil Registration Vital Statistics systems and Demographic and Health Survey data. We calibrated the model to empirical data from 1990 to 2015 and assessed the predictive accuracy of our model using indicators from 2016 to 2020. We projected maternal health indicators from 1990 to 2050 for each country and estimate that between 1990 and 2020 annual global maternal deaths declined by over 40\% from 587,500 (95\% uncertainty intervals (UI) 520,600{\textendash}714,000) to 337,600 (95\% UI 307,900{\textendash}364,100), and are projected to decrease to 327,400 (95\% UI 287,800{\textendash}360,700) in 2030 and 320,200 (95\% UI 267,100{\textendash}374,600) in 2050. The global maternal mortality ratio is projected to decline to 167 (95\% UI 142{\textendash}188) in 2030, with 58 countries above 140, suggesting that on current trends, maternal mortality Sustainable Development Goal targets are unlikely to be met. Building on the development of our structural model, future research can identify context-specific policy interventions that could allow countries to accelerate reductions in maternal deaths.}, url = {https://www.nature.com/articles/s41591-023-02310-x}, author = {Ward, Zachary J. and Rifat Atun and Gary King and Sequeira Dmello, Brenda and Goldie, Sue J.} } @article {646717, title = {Statistically Valid Inferences from Differentially Private Data Releases, with Application to the Facebook URLs Dataset}, journal = {Political Analysis}, volume = {31}, number = {1}, year = {2023}, month = {2021}, pages = {1-21}, abstract = { We offer methods to analyze the "differentially private"\ Facebook URLs Dataset\ which, at over 40 trillion cell values, is one of the largest social science research datasets ever constructed. The version of differential privacy used in the URLs dataset has specially calibrated random noise added, which provides mathematical guarantees for the privacy of individual research subjects while still making it possible to learn about aggregate patterns of interest to social scientists. Unfortunately, random noise creates measurement error which induces statistical bias --\ including attenuation, exaggeration, switched signs, or incorrect uncertainty estimates. We adapt methods developed to correct for naturally occurring measurement error, with special attention to computational efficiency for large datasets. The result is statistically valid linear regression estimates and descriptive statistics that can be interpreted as ordinary analyses of non-confidential data but with appropriately larger standard errors. We have implemented these methods in open source software for R called PrivacyUnbiased.\  Facebook has ported PrivacyUnbiased\ to open source Python code called\ svinfer.\  We have extended these results in Evans and King (2021). }, url = {https://www.doi.org/10.1017/pan.2022.1}, author = {Georgina Evans and Gary King} } @article {603580, title = {An Improved Method of Automated Nonparametric Content Analysis for Social Science}, journal = {Political Analysis}, volume = {31}, year = {2022}, pages = {42-58}, abstract = { Some scholars build models to classify documents into chosen categories. Others, especially social scientists who tend to focus on population characteristics, instead usually estimate the proportion of documents in each category -- using either parametric "classify-and-count"\ methods or "direct"\ nonparametric estimation of proportions without individual classification. Unfortunately, classify-and-count methods can be highly model dependent or generate more bias in the proportions even as the percent of documents correctly classified increases. Direct estimation avoids these problems, but can suffer when the meaning of language changes between training and test sets or is too similar across categories. We develop an improved direct estimation approach without these issues by including and optimizing continuous text features, along with a form of matching adapted from the causal inference literature. Our approach substantially improves performance in a diverse collection of 73\ data sets. We also offer easy-to-use software software that implements all ideas discussed herein. }, author = {Connor T. Jerzak and Gary King and Anton Strezhnev} } @article {680770, title = {Rejoinder: Concluding Remarks on Scholarly Communications}, journal = {Political Analysis}, year = {2022}, abstract = { We are grateful to DeFord et al. for the continued attention to our work and the crucial issues of fair representation in democratic electoral systems. Our response (Katz, King, and Rosenblatt, forthcoming) was designed to help readers avoid being misled by mistaken claims in DeFord et al. (forthcoming-a), and does not address other literature or uses of our prior work. As it happens, none of our corrections were addressed (or contradicted) in the most recent submission (DeFord et al., forthcoming-b). We also offer a recommendation regarding DeFord et al.{\textquoteright}s (forthcoming-b) concern with how expert witnesses, consultants, and commentators should present academic scholarship to academic novices, such as judges, public officials, the media, and the general public. In these public service roles, scholars attempt to translate academic understanding of sophisticated scholarly literatures, technical methodologies, and complex theories for those without sufficient background in social science or statistics.\  }, author = {Jonathan Katz and Gary King and Elizabeth Rosenblatt} } @article {543936, title = {How to Measure Legislative District Compactness If You Only Know it When You See It}, journal = {American Journal of Political Science}, volume = {65}, number = {3}, year = {2021}, month = {July, 2021}, pages = {533-550}, abstract = { To deter gerrymandering, many state constitutions require legislative districts to be "compact."\ Yet, the law offers few precise definitions other than "you know it when you see it,"\ which effectively implies a common understanding of the concept. In contrast, academics have shown that compactness has multiple dimensions and have generated many conflicting measures. We hypothesize that both are correct -- that compactness is complex and multidimensional, but a common understanding exists across people. We develop a survey to elicit this understanding, with high reliability (in data where the standard paired comparisons approach fails). We create a statistical model that predicts, with high accuracy, solely from the geometric features of the district, compactness evaluations by judges and public officials responsible for redistricting, among others. We also offer compactness data from our validated measure for 20,160 state legislative and congressional districts, as well as open source software to compute this measure from any district. Winner of the 2018 Robert H Durr Award from the MPSA. }, url = {http://doi.org/10.1111/ajps.12603}, author = {Aaron Kaufman and Gary King and Mayya Komisarchik} } @article {673969, title = {Precision mapping child undernutrition for nearly 600,000 inhabited census villages in India}, journal = {Proceedings of the National Academy of Sciences}, volume = {118}, number = {18}, year = {2021}, pages = {1-11}, abstract = {There are emerging opportunities to assess health indicators at truly small areas with increasing availability of data geocoded to micro geographic units and advanced modeling techniques. The utility of such fine-grained data can be fully leveraged if linked to local governance units that are accountable for implementation of programs and interventions. We used data from the 2011 Indian Census for village-level demographic and amenities features and the 2016 Indian Demographic and Health Survey in a bias-corrected semisupervised regression framework to predict child anthropometric failures for all villages in India. Of the total geographic variation in predicted child anthropometric failure estimates, 54.2 to 72.3\% were attributed to the village level followed by 20.6 to 39.5\% to the state level. The mean predicted stunting was 37.9\% (SD: 10.1\%; IQR: 31.2 to 44.7\%), and substantial variation was found across villages ranging from less than 5\% for 691 villages to over 70\% in 453 villages. Estimates at the village level can potentially shift the paradigm of policy discussion in India by enabling more informed prioritization and precise targeting. The proposed methodology can be adapted and applied to diverse population health indicators, and in other contexts, to reveal spatial heterogeneity at a finer geographic scale and identify local areas with the greatest needs and with direct implications for actions to take place.}, url = {https://doi.org/10.1073/pnas.2025865118}, author = {Rockli Kim and Avleen S. Bijral and Yun Xu and Xiuyuan Zhang and Jeffrey C. Blossom and Akshay Swaminathan and Gary King and Alok Kumar and Rakesh Sarwal and Juan M. Lavista Ferres and Subramanian, S.V.} } @article {650525, title = {Survey Data and Human Computation for Improved Flu Tracking}, journal = {Nature Communications}, volume = {12}, number = {194}, year = {2021}, pages = {1-8}, abstract = {While digital trace data from sources like search engines hold enormous potential for tracking and understanding human behavior, these streams of data lack information about the actual experiences of those individuals generating the data. Moreover, most current methods ignore or under-utilize human processing capabilities that allow humans to solve problems not yet solvable by computers (human computation). We demonstrate how behavioral research, linking digital and real-world behavior, along with human computation, can be utilized to improve the performance of studies using digital data streams. This study looks at the use of search data to track prevalence of Influenza-Like Illness (ILI). We build a behavioral model of flu search based on survey data linked to users{\textquoteright} online browsing data. We then utilize human computation for classifying search strings. Leveraging these resources, we construct a tracking model of ILI prevalence that outperforms strong historical benchmarks using only a limited stream of search data and lends itself to tracking ILI in smaller geographic units. While this paper only addresses searches related to ILI, the method we describe has potential for tracking a broad set of phenomena in near real-time.}, url = {https://doi.org/10.1038/s41467-020-20206-z}, author = {Stefan Wojcik and Avleen Bijral and Richard Johnston and Juan Miguel Lavista and Gary King and Ryan Kennedy and Alessandro Vespignani and Lazer, David} } @article {608730, title = {A Theory of Statistical Inference for Ensuring the Robustness of Scientific Results}, journal = {Management Science}, year = {2021}, pages = {1-24}, abstract = {Inference is the process of using facts we know to learn about facts we do not know. A theory of inference gives assumptions necessary to get from the former to the latter, along with a definition for and summary of the resulting uncertainty. Any one theory of inference is neither right nor wrong, but merely an axiom that may or may not be useful. Each of the many diverse theories of inference can be valuable for certain applications. However, no existing theory of inference addresses the tendency to choose, from the range of plausible data analysis specifications consistent with prior evidence, those that inadvertently favor one{\textquoteright}s own hypotheses. Since the biases from these choices are a growing concern across scientific fields, and in a sense the reason the scientific community was invented in the first place, we introduce a new theory of inference designed to address this critical problem. We derive "hacking intervals," which are the range of a summary statistic one may obtain given a class of possible endogenous manipulations of the data. Hacking intervals require no appeal to hypothetical data sets drawn from imaginary superpopulations. A scientific result with a small hacking interval is more robust to researcher manipulation than one with a larger interval, and is often easier to interpret than a classical confidence interval. Some versions of hacking intervals turn out to be equivalent to classical confidence intervals, which means they may also provide a more intuitive and potentially more useful interpretation of classical confidence intervals.\ }, url = {https://doi.org/10.1287/mnsc.2020.3818}, author = {Beau Coker and Rudin, Cynthia and Gary King} } @article {662432, title = {Computational social science: Obstacles and opportunities}, journal = {Science}, volume = {369}, number = {6507}, year = {2020}, pages = {1060-1062}, abstract = {The field of computational social science (CSS) has exploded in prominence over the past decade, with thousands of papers published using observational data, experimental designs, and large-scale simulations that were once unfeasible or unavailable to researchers. These studies have greatly improved our understanding of important phenomena, ranging from social inequality to the spread of infectious diseases. The institutions supporting CSS in the academy have also grown substantially, as evidenced by the proliferation of conferences, workshops, and summer schools across the globe, across disciplines, and across sources of data. But the field has also fallen short in important ways. Many institutional structures around the field{\textemdash}including research ethics, pedagogy, and data infrastructure{\textemdash}are still nascent. We suggest opportunities to address these issues, especially in improving the alignment between the organization of the 20th-century university and the intellectual requirements of the field.}, url = {https://science.sciencemag.org/content/369/6507/1060}, author = {David M. J. Lazer and Pentland, Alex and Duncan J. Watts and Aral, Sinan and Susan Athey and Contractor, Noshir and Deen Freelon and Sandra Gonzalez-Bailon and Gary King and Helen Margetts and Alondra Nelson and Matthew J. Salganik and Markus Strohmaier and Alessandro Vespignani and Claudia Wagner} } @article {662888, title = {Population-scale Longitudinal Mapping of COVID-19 Symptoms, Behaviour and Testing}, journal = {Nature Human Behavior}, year = {2020}, abstract = {Despite the widespread implementation of public health measures, coronavirus disease 2019 (COVID-19) continues to spread in the United States. To facilitate an agile response to the pandemic, we developed How We Feel, a web and mobile application that collects longitudinal self-reported survey responses on health, behaviour and demographics. Here, we report results from over 500,000 users in the United States from 2 April 2020 to 12 May 2020. We show that self-reported surveys can be used to build predictive models to identify likely COVID-19-positive individuals. We find evidence among our users for asymptomatic or presymptomatic presentation; show a variety of exposure, occupational and demographic risk factors for COVID-19 beyond symptoms; reveal factors for which users have been SARS-CoV-2 PCR tested; and highlight the temporal dynamics of symptoms and self-isolation behaviour. These results highlight the utility of collecting a diverse set of symptomatic, demographic, exposure and behavioural self-reported data to fight the COVID-19 pandemic.}, url = {https://rdcu.be/b7dA6}, author = {William E. Allen and Altae-Tran, Han and Briggs, James and Xin Jin and McGee, Glen and Shi, Andy and Raghavan, Rumya and Mireille Kamariza and Nova, Nicole and Pereta, Albert and Danford, Chris and Kamel, Amine and Gothe, Patrik and Milam, Evrhet and Aurambault, Jean and Primke, Thorben and Li, Weijie and Inkenbrandt, Josh and Huynh, Tuan and Chen, Evan and Lee, Christina and Croatto, Michael and Bentley, Helen and Lu, Wendy and Murray, Robert and Travassos, Mark and Brent A. Coull and Openshaw, John and Casey S. Greene and Shalem, Ophir and Gary King and Probasco, Ryan and Cheng, David R. and Silbermann, Ben and Zhang, Feng and Lin, Xihong} } @article {651149, title = {Building an International Consortium for Tracking Coronavirus Health Status}, journal = {Nature Medicine}, volume = {26}, year = {2020}, pages = {1161-1165}, abstract = {Information is the most potent protective weapon we have to combat a pandemic, at both the individual and global level. For individuals, information can help us make personal decisions and provide a sense of security. For the global community, information can inform policy decisions and offer critical insights into the epidemic of COVID-19 disease. Fully leveraging the power of information, however, requires large amounts of data and access to it. To achieve this, we are making steps to form an international consortium, Coronavirus Census Collective (CCC, coronaviruscensuscollective.org), that will serve as a hub for integrating information from multiple data sources that can be utilized to understand, monitor, predict, and combat global pandemics. These sources may include self-reported health status through surveys (including mobile apps), results of diagnostic laboratory tests, and other static and real-time geospatial data. This collective effort to track and share information will be invaluable in predicting hotspots of disease outbreak, identifying which factors control the rate of spreading, informing immediate policy decisions, evaluating the effectiveness of measures taken by health organizations on pandemic control, and providing critical insight on the etiology of COVID-19. It will also help individuals stay informed on this rapidly evolving situation and contribute to other global efforts to slow the spread of disease. In the past few weeks, several initiatives across the globe have surfaced to use daily self-reported symptoms as a means to track disease spread, predict outbreak locations, guide population measures and help in the allocation of healthcare resources. The aim of this paper is to put out a call to standardize these efforts and spark a collaborative effort to maximize the global gain while protecting participant privacy.}, url = {https://www.nature.com/articles/s41591-020-0929-x}, author = {Segal, Eran and Zhang, Feng and Lin, Xihong and Gary King and Shalem, Ophir and Smadar Shilo and William E. Allen and Yonatan H. Grad and Casey S. Greene and Faisal Alquaddoomi and Anders, Simon and Ran Balicer and Tal Bauman and Ximena Bonilla and Gisel Booman and Andrew T. Chan and Ori Cohen and Silvano Coletti and Natalie Davidson and Yuval Dor and David A. Drew and Elemento, Olivier and Georgina Evans and Phil Ewels and Joshua Gale and Amir Gavrieli and Geiger, Benjamin and Iman Hajirasouliha and Roman Jerala and Andre Kahles and Olli Kallioniemi and Ayya Keshet and Gregory Landua and Tomer Meir and Aline Muller and Long H. Nguyen and Oresic, Matej and Svetlana Ovchinnikova and Hedi Peterson and Jay Rajagopal and Gunnar R{\"a}tsch and Hagai Rossman and Johan Rung and Sboner, Andrea and Alexandros Sigaras and Spector, Tim and Ron Steinherz and Irene Stevens and Vilo, Jaak and Paul Wilmes and CCC (Coronavirus Census Collective)} } @article {366526, title = {Do Nonpartisan Programmatic Policies Have Partisan Electoral Effects? Evidence from Two Large Scale Experiments}, journal = {Journal of Politics}, volume = {81}, number = {2}, year = {2020}, month = {2020}, pages = {714-730}, abstract = {A vast literature demonstrates that voters around the world who benefit from their governments{\textquoteright} discretionary spending cast more ballots for the incumbent party than those who do not benefit. But contrary to most theories of political accountability, some suggest that voters also reward incumbent parties for implementing "programmatic"\ spending legislation, over which incumbents have no discretion, and even when passed with support from all major parties. Why voters would attribute responsibility when none exists is unclear, as is why minority party legislators would approve of legislation that would cost them votes. We study the electoral effects of two large prominent programmatic policies that fit the ideal type especially well, with unusually large scale experiments that bring more evidence to bear on this question than has previously been possible. For the first policy, we design and implement ourselves one of the largest randomized social experiments ever. For the second policy, we reanalyze studies that used a large scale randomized experiment and a natural experiment to study the same question but came to opposite conclusions. Using corrected data and improved statistical methods, we show that the evidence from all analyses of both policies is consistent: programmatic policies have no effect on voter support for incumbents. We conclude by discussing how the many other studies in the literature may be interpreted in light of our results.}, url = {http://www.journals.uchicago.edu/doi/10.1086/707059}, author = {Kosuke Imai and Gary King and Carlos Velasco Rivera} } @article {638482, title = {The {\textquotedblleft}Math Prefresher{\textquotedblright} and The Collective Future of Political Science Graduate Training}, journal = {PS: Political Science and Politics}, volume = {53}, number = {3}, year = {2020}, pages = {537-541}, abstract = { The political science math prefresher arose a quarter century ago and has now spread to many of our discipline{\textquoteright}s Ph.D. programs. Incoming students arrive for graduate school a few weeks early for ungraded instruction in math, statistics, and computer science as they are useful for political science. The prefresher{\textquoteright}s benefits, however, go beyond the technical material taught: it develops lasting camaraderie with their entering class, facilitates connections with senior graduate students, opens pathways to mastering methods necessary for research, and eases the transition to the increasingly collaborative nature of graduate work. The prefresher also shows how faculty across a highly diverse discipline can work together to train the next generation. We review this program, highlight its collaborative aspects, and try to take the idea to the next level by building infrastructure to share teaching materials across universities so separate programs can build on each other{\textquoteright}s work and improve all our programs. }, url = {https://doi.org/10.1017/S1049096519002245}, author = {Gary King and Kuriwaki, Shiro and Yon Soo Park} } @article {626161, title = {Theoretical Foundations and Empirical Evaluations of Partisan Fairness in District-Based Democracies}, journal = {American Political Science Review}, volume = {114}, number = {1}, year = {2020}, pages = {164-178}, abstract = {We clarify the theoretical foundations of partisan fairness standards for district-based democratic electoral systems, including essential assumptions and definitions that have not been recognized, formalized, or in some cases even discussed. We also offer extensive empirical evidence for assumptions with observable implications. Throughout, we follow a fundamental principle of statistical inference too often ignored in this literature -- defining the quantity of interest separately so its measures can be proven wrong, evaluated, or improved. This enables us to prove which of the many newly proposed fairness measures are statistically appropriate and which are biased, limited, or not measures of the theoretical quantity they seek to estimate at all. Because real world redistricting and gerrymandering involves complicated politics with numerous participants and conflicting goals, measures biased for partisan fairness sometimes still provide useful descriptions of other aspects of electoral systems.}, url = { https://doi.org/10.1017/S000305541900056X}, author = {Jonathan N. Katz and Gary King and Elizabeth Rosenblatt} } @article {608283, title = {Ecological Regression with Partial Identification}, journal = {Political Analysis}, volume = {28}, number = {1}, year = {2019}, month = {2019}, pages = {1--22}, abstract = { Ecological inference (EI) is the process of learning about individual behavior from aggregate data. We relax assumptions by allowing for {\textquoteleft}{\textquoteleft}linear contextual effects,{\textquoteright}{\textquoteright} which previous works have regarded as plausible but avoided due to non-identification, a problem we sidestep by deriving bounds instead of point estimates. In this way, we offer a conceptual framework to improve on the Duncan-Davis bound, derived more than sixty-five years ago. To study the effectiveness of our approach, we collect and analyze 8,430\  2x2\ EI datasets with known ground truth from several sources --- thus bringing considerably more data to bear on the problem than the existing dozen or so datasets available in the literature for evaluating EI estimators. For the 88\% of real data sets in our collection that fit a proposed rule, our approach reduces the width of the Duncan-Davis bound, on average, by about 44\%, while still capturing the true district level parameter about 99\% of the time. The remaining 12\% revert to the Duncan-Davis bound.\  Easy-to-use software is available that implements all the methods described in the paper.\  }, author = {Wenxin Jiang and Gary King and Allen Schmaltz and Martin A. Tanner} } @article {607533, title = {A New Model for Industry-Academic Partnerships}, journal = {PS: Political Science and Politics}, volume = {53}, number = {4}, year = {2019}, month = {2019}, pages = {703-709}, abstract = { The mission of the social sciences is to understand and ameliorate society{\textquoteright}s greatest challenges. The data held by private companies, collected for different purposes, hold vast potential to further this mission. Yet, because of consumer privacy, trade secrets, proprietary content, and political sensitivities, these datasets are often inaccessible to scholars. We propose a novel organizational model to address these problems. We also report on the first partnership under this model, to study the incendiary issues surrounding the impact of social media on elections and democracy: Facebook provides (privacy-preserving) data access; eight ideologically and substantively diverse charitable foundations provide funding; an organization of academics we created, Social Science One (see SocialScience.One), leads the project; and the Institute for Quantitative Social Science at Harvard and the Social Science Research Council provide logistical help. }, url = {https://www.cambridge.org/core/journals/ps-political-science-and-politics/article/new-model-for-industryacademic-partnerships/AD7D0B8EA582DC017D9A24754D833CAA}, author = {Gary King and Nathaniel Persily} } @article {33638, title = {A Theory of Statistical Inference for Matching Methods in Causal Research}, journal = {Political Analysis}, volume = {27}, number = {1}, year = {2019}, month = {2018}, pages = {46-68}, abstract = {Researchers who generate data often optimize efficiency and robustness by choosing stratified over simple random sampling designs. Yet, all theories of inference proposed to justify matching methods are based on simple random sampling. This is all the more troubling because, although these theories require exact matching, most matching applications resort to some form of ex post stratification (on a propensity score, distance metric, or the covariates) to find approximate matches, thus nullifying the statistical properties these theories are designed to ensure. Fortunately, the type of sampling used in a theory of inference is an axiom, rather than an assumption vulnerable to being proven wrong, and so we can replace simple with stratified sampling, so long as we can show, as we do here, that the implications of the theory are coherent and remain true. Properties of estimators based on this theory are much easier to understand and can be satisfied without the unattractive properties of existing theories, such as assumptions hidden in data analyses rather than stated up front, asymptotics, unfamiliar estimators, and complex variance calculations. Our theory of inference makes it possible for researchers to treat matching as a simple form of preprocessing to reduce model dependence, after which all the familiar inferential techniques and uncertainty calculations can be applied. This theory also allows binary, multicategory, and continuous treatment variables from the outset and straightforward extensions for imperfect treatment assignment and different versions of treatments.}, author = {Stefano M. Iacus and Gary King and Giuseppe Porro} } @article {226731, title = {Why Propensity Scores Should Not Be Used for Matching}, journal = {Political Analysis}, volume = {27}, number = {4}, year = {2019}, month = {2019}, pages = {435-454}, abstract = { We show that propensity score matching (PSM), an enormously popular method of preprocessing data for causal inference, often accomplishes the opposite of its intended goal --- thus increasing imbalance, inefficiency, model dependence, and bias. The weakness of PSM comes from its attempts to approximate a completely randomized experiment, rather than, as with other matching methods, a more efficient fully blocked randomized experiment. PSM is thus uniquely blind to the often large portion of imbalance that can be eliminated by approximating full blocking with other matching methods. Moreover, in data balanced enough to approximate complete randomization, either to begin with or after pruning some observations, PSM approximates random matching which, we show, increases imbalance even relative to the original data. Although these results suggest researchers replace PSM with one of the other available matching methods, propensity scores have other productive uses. }, url = {https://doi.org/10.1017/pan.2019.11}, author = {Gary King and Richard Nielsen} } @article {606104, title = {Use of a Social Annotation Platform for Pre-Class Reading Assignments in a Flipped Introductory Physics Class}, journal = {Frontiers in Education}, volume = {3}, number = {8}, year = {2018}, pages = {1-12}, abstract = {In this paper, we illustrate the successful implementation of pre-class reading assignments through a social learning platform that allows students to discuss the reading online with their classmates. We show how the platform can be used to understand how students are reading before class. We find that, with this platform, students spend an above average amount of time reading (compared to that reported in the literature) and that most students complete their reading assignments before class. We identify specific reading behaviors that are predictive of in-class exam performance. We also demonstrate ways that the platform promotes active reading strategies and produces high-quality learning interactions between students outside class. Finally, we compare the exam performance of two cohorts of students, where the only difference between them is the use of the platform; we show that students do significantly better on exams when using the platform.Reprinted in Cassidy, R., Charles, E. S., Slotta, J. D., Lasry, N., eds. (2019). Active Learning: Theoretical Perspectives, Empirical Studies and Design Profiles. Lausanne: Frontiers Media. doi: 10.3389/978-2-88945-885-1}, url = {https://www.frontiersin.org/articles/10.3389/feduc.2018.00008/full?\&utm_source=Email_to_authors_\&utm_medium=Email\&utm_content=T1_11.5e1_author\&utm_campaign=Email_publication\&field=\&journalName=Frontiers_in_Education\&id=315665}, author = {Miller, Kelly and Lukoff, Brian and Gary King and Eric Mazur} } @article {584466, title = {Edited transcript of a talk on Partisan Symmetry at the {\textquoteright}Redistricting and Representation Forum{\textquoteright}}, journal = {Bulletin of the American Academy of Arts and Sciences}, volume = {Winter}, year = {2018}, month = {2018}, pages = {55-58}, abstract = {The origin, meaning, estimation, and application of the concept of partisan symmetry\ in legislative redistricting, and the justiciability of partisan gerrymandering. An edited transcript of a talk at the {\textquotedblleft}Redistricting and Representation Forum,{\textquotedblright} American Academy of Arts \& Sciences, Cambridge, MA 11/8/2017.Here also is a video of the original talk.}, author = {Gary King} } @article {581966, title = {How the news media activate public expression and influence national agendas}, journal = {Science}, volume = {358}, year = {2017}, pages = {776-780}, abstract = { We demonstrate that exposure to the news media causes Americans to take public stands on specific issues, join national policy conversations, and express themselves publicly{\textemdash}all key components of democratic politics{\textemdash}more often than they would otherwise. After recruiting 48 mostly small media outlets, we chose groups of these outlets to write and publish articles on subjects we approved, on dates we randomly assigned. We estimated the causal effect on proximal measures, such as website pageviews and Twitter discussion of the articles{\textquoteright} specific subjects, and distal ones, such as national Twitter conversation in broad policy areas. Our intervention increased discussion in each broad policy area by approximately 62.7\% (relative to a day{\textquoteright}s volume), accounting for 13,166 additional posts over the treatment week, with similar effects across population subgroups.\  On the\ Science website:\ Abstract,\ Reprint,\ Full text, and a comment (by\ Matthew Gentzkow)\ "Small media, big impact". \  \  }, url = {http://science.sciencemag.org/cgi/rapidpdf/358/6364/776?ijkey=yJZhbgQUH1Gi.\&keytype=ref\&siteid=sci}, author = {Gary King and Benjamin Schneer and White, Ariel} } @article {401886, title = {The Balance-Sample Size Frontier in Matching Methods for Causal Inference}, journal = {American Journal of Political Science}, volume = {61}, number = {2}, year = {2017}, month = {2016}, pages = {473-489}, abstract = {We propose a simplified approach to matching for causal inference that simultaneously optimizes balance (similarity between the treated and control groups) and matched sample size. Existing approaches either fix the matched sample size and maximize balance or fix balance and maximize sample size, leaving analysts to settle for suboptimal solutions or attempt manual optimization by iteratively tweaking their matching method and rechecking balance. To jointly maximize balance and sample size, we introduce the matching frontier, the set of matching solutions with maximum possible balance for each sample size. Rather than iterating, researchers can choose matching solutions from the frontier for analysis in one step. We derive fast algorithms that calculate the matching frontier for several commonly used balance metrics. We demonstrate with analyses of the effect of sex on judging and job training programs that show how the methods we introduce can extract new knowledge from existing data sets.Easy to use, open source, software is available here\ to implement all methods in the paper.}, author = {Gary King and Christopher Lucas and Richard Nielsen} } @article {457551, title = {booc.io: An Education System with Hierarchical Concept Maps}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {23}, number = {1}, year = {2017}, pages = {571-580}, abstract = { Information hierarchies are difficult to express when real-world space or time constraints force traversing the hierarchy in linear presentations, such as in educational books and classroom courses. We present booc.io, which allows linear and non-linear presentation and navigation of educational concepts and material. To support a breadth of material for each concept, booc.io is Web based, which allows adding material such as lecture slides, book chapters, videos, and LTIs. A visual interface assists the creation of the needed hierarchical structures. The goals of our system were formed in expert interviews, and we explain how our design meets these goals. We adapt a real-world course into booc.io, and perform introductory qualitative evaluation with students. }, url = {https://ieeexplore.ieee.org/document/7536150}, author = {Michail Schwab and Hendrik Strobelt and James Tompkin and Colin Fredericks and Huff, Connor and Dana Higgins and Anton Strezhnev and Mayya Komisarchik and Gary King and Pfister, Hanspeter} } @article {181441, title = {Computer-Assisted Keyword and Document Set Discovery from Unstructured Text}, journal = {American Journal of Political Science}, volume = {61}, number = {4}, year = {2017}, month = {2017}, pages = {971-988}, abstract = {The (unheralded) first step in many applications of automated text analysis involves selecting keywords to choose documents from a large text corpus for further study. Although all substantive results depend on this choice, researchers usually pick keywords in ad hoc ways that are far from optimal and usually biased. Paradoxically, this often means that the validity of the most sophisticated text analysis methods depend in practice on the inadequate keyword counting or matching methods they are designed to replace. Improved methods of keyword selection would also be valuable in many other areas, such as following conversations that rapidly innovate language to evade authorities, seek political advantage, or express creativity; generic web searching; eDiscovery; look-alike modeling; intelligence analysis; and sentiment and topic analysis. We develop a computer-assisted (as opposed to fully automated) statistical approach that suggests keywords from available text without needing structured data as inputs. This framing poses the statistical problem in a new way, which leads to a widely applicable algorithm. Our specific approach is based on training classifiers, extracting information from (rather than correcting) their mistakes, and summarizing results with Boolean search strings. We illustrate how the technique works with analyses of English texts about the Boston Marathon Bombings, Chinese social media posts designed to evade censorship, among others.}, url = {https://doi.org/10.1111/ajps.12291}, author = {Gary King and Patrick Lam and Margaret Roberts} } @article {399886, title = {How the Chinese Government Fabricates Social Media Posts for Strategic Distraction, not Engaged Argument}, journal = {American Political Science Review}, volume = {111}, number = {3}, year = {2017}, month = {2017}, pages = {484-501}, abstract = { The Chinese government has long been suspected of hiring as many as 2,000,000 people to surreptitiously insert huge numbers of pseudonymous and other deceptive writings into the stream of real social media posts, as if they were the genuine opinions of ordinary people. Many academics, and most journalists and activists, claim that these so-called {\textquoteleft}{\textquoteleft}50c party{\textquoteright}{\textquoteright} posts vociferously argue for the government{\textquoteright}s side in political and policy debates. As we show, this is also true of the vast majority of posts openly accused on social media of being 50c. Yet, almost no systematic empirical evidence exists for this claim, or, more importantly, for the Chinese regime{\textquoteright}s strategic objective in pursuing this activity. In the first large scale empirical analysis of this operation, we show how to identify the secretive authors of these posts, the posts written by them, and their content. We estimate that the government fabricates and posts about 448 million social media comments a year. In contrast to prior claims, we show that the Chinese regime{\textquoteright}s strategy is to avoid arguing with skeptics of the party and the government, and to not even discuss controversial issues. We show that the goal of this massive secretive operation is instead to distract the public and change the subject, as most of the these posts involve cheerleading for China, the revolutionary history of the Communist Party, or other symbols of the regime. We discuss how these results fit with what is known about the Chinese censorship program, and suggest how they may change our broader theoretical understanding of {\textquoteleft}{\textquoteleft}common knowledge{\textquoteright}{\textquoteright} and information control in authoritarian regimes. This paper is related to our articles in Science,\ {\textquotedblleft}Reverse-Engineering Censorship In China: Randomized Experimentation And Participant Observation{\textquotedblright}, and\ the\ American Political Science Review,\ {\textquotedblleft}How Censorship In China Allows Government Criticism But Silences Collective Expression{\textquotedblright}. }, url = {doi:10.1017/S0003055417000144}, author = {Gary King and Jennifer Pan and Margaret E. Roberts} } @article {161326, title = {A Unified Approach to Measurement Error and Missing Data: Details and Extensions}, journal = {Sociological Methods and Research}, volume = {46}, number = {3}, year = {2017}, note = { This is the second of two articles to appear in the same issue of the same journal by the same authors. \ The other one is\ {\textquotedblleft}A Unified Approach to Measurement Error and Missing Data: Overview{\textquotedblright}. }, pages = {342-369}, abstract = { We extend a unified and easy-to-use approach to measurement error and missing data. In our companion article, Blackwell, Honaker, and King give an intuitive overview of the new technique, along with practical suggestions and empirical applications. Here, we offer more precise technical details, more sophisticated measurement error model specifications and estimation procedures, and analyses to assess the approach{\textquoteright}s robustness to correlated measurement errors and to errors in categorical variables. These results support using the technique to reduce bias and increase efficiency in a wide variety of empirical research. }, url = {http://journals.sagepub.com/doi/full/10.1177/0049124115589052}, author = {Matthew Blackwell and James Honaker and Gary King} } @article {6388, title = {A Unified Approach to Measurement Error and Missing Data: Overview and Applications}, journal = {Sociological Methods and Research}, volume = {46}, number = {3}, year = {2017}, note = { This is the first of two articles to appear in the same issue of the same journal by the same authors. \ The second is\ {\textquotedblleft}A Unified Approach to Measurement Error and Missing Data: Details and Extensions{\textquotedblright}. }, pages = {303-341}, abstract = { Although social scientists devote considerable effort to mitigating measurement error during data collection, they often ignore the issue during data analysis. And although many statistical methods have been proposed for reducing measurement error-induced biases, few have been widely used because of implausible assumptions, high levels of model dependence, difficult computation, or inapplicability with multiple mismeasured variables. We develop an easy-to-use alternative without these problems; it generalizes the popular multiple imputation (MI) framework by treating missing data problems as a limiting special case of extreme measurement error, and corrects for both. Like MI, the proposed framework is a simple two-step procedure, so that in the second step researchers can use whatever statistical method they would have if there had been no problem in the first place. We also offer empirical illustrations, open source software that implements all the methods described herein, and a companion paper with technical details and extensions (Blackwell, Honaker, and King, 2017b). }, url = {http://journals.sagepub.com/doi/full/10.1177/0049124115585360}, author = {Matthew Blackwell and James Honaker and Gary King} } @article {377091, title = {Comment on {\textquoteright}Estimating the Reproducibility of Psychological Science{\textquoteright}}, journal = {Science}, volume = {351}, number = {6277}, year = {2016}, pages = {1037a-1038a}, abstract = {A\ recent article\ by the Open Science Collaboration (a group of 270 coauthors) gained considerable academic and public attention due to its sensational conclusion that the replicability of psychological science is surprisingly low.\ Science\ magazine lauded this article as one of the top 10 scientific breakthroughs of the year across all fields of science, reports of which appeared on the front pages of newspapers worldwide.\ We show that OSC{\textquoteright}s article contains three major statistical errors and, when corrected, provides no evidence of a replication crisis. Indeed, the evidence is consistent with the opposite conclusion -- that the reproducibility of psychological science is quite high and, in fact, statistically indistinguishable from 100\%. (Of course, that doesn{\textquoteright}t mean that the replicability is 100\%, only that the evidence is insufficient to reliably estimate replicability.)\ The moral of the story is that meta-science must follow the rules of science. Replication data is available in this dataverse archive. See also the full\ web site\ for this article and related materials, and one of the news articles\ written about it.}, url = {http://science.sciencemag.org/content/351/6277/1037.2}, author = {Gilbert, Daniel and Gary King and Stephen Pettigrew and Timothy Wilson} } @article {457561, title = {Effectiveness of the WHO Safe Childbirth Checklist Program in Reducing Severe Maternal, Fetal, and Newborn Harm: Study Protocol for a Matched-Pair, Cluster Randomized Controlled Trial in Uttar Pradesh, India}, journal = {Trials}, volume = {576}, number = {17}, year = {2016}, pages = {1-10}, abstract = { Background: Effective, scalable strategies to improve maternal, fetal, and newborn health and reduce preventable morbidity and mortality are urgently needed in low- and middle-income countries. Building on the successes of previous checklist-based programs, the World Health Organization (WHO) and partners led the development of the Safe Childbirth Checklist (SCC), a 28-item list of evidence-based practices linked with improved maternal and newborn outcomes. Pilot-testing of the Checklist in Southern India demonstrated dramatic improvements in adherence by health workers to essential childbirth-related practices (EBPs). The BetterBirth Trial seeks to measure the effectiveness of SCC impact on EBPs, deaths, and complications at a larger scale. Methods: This matched-pair, cluster-randomized controlled, adaptive trial will be conducted in 120 facilities across 24 districts in Uttar Pradesh, India. Study sites, identified according to predefined eligibility criteria, were matched by measured covariates before randomization. The intervention, the SCC embedded in a quality improvement program, consists of leadership engagement, a 2-day educational launch of the SCC, and support through placement of a trained peer {\textquotedblleft}coach{\textquotedblright} to provide supportive supervision and real-time data feedback over an 8-month period with decreasing intensity. A facility-based childbirth quality coordinator is trained and supported to drive sustained behavior change after the BetterBirth team leaves the facility. Study participants are birth attendants and women and their newborns who present to the study facilities for childbirth at 60 intervention and 60 control sites. The primary outcome is a composite measure including maternal death, maternal severe morbidity, stillbirth, and newborn death, occurring within 7 days after birth. The sample size (n = 171,964) was calculated to detect a 15\% reduction in the primary outcome. Adherence by health workers to EBPs will be measured in a subset of births (n = 6000).\ The trial will be conducted in close collaboration with key partners including the Governments of India and Uttar Pradesh, the World Health Organization, an expert Scientific Advisory Committee, an experienced local implementing organization (Population Services International, PSI), and frontline facility leaders and workers Discussion: If effective, the WHO Safe Childbirth Checklist program could be a powerful health facilitystrengthening intervention to improve quality of care and reduce preventable harm to women and newborns, with millions of potential beneficiaries. Trial registration: BetterBirth Study Protocol dated: 13 February 2014; ClinicalTrials.gov: NCT02148952; Universal Trial Number: U1111-1131-5647.\  }, url = {http://rdcu.be/AGQs}, author = {Semrau, Katherine and Lisa R. Hirschhorn and Bhala Kodkany and Jonathan Spector and Danielle E. Tuller and Gary King and Stuart Lisptiz and Narender Sharma and Vinay P. Singh and Bharath Kumar and Neelam Dhingra-Kumar and Rebecca Firestone and Vishwajeet Kumar and Gawande, Atul} } @article {412531, title = {Scoring Social Security Proposals: Response from Kashin, King, and Soneji}, journal = {Journal of Economic Perspectives}, volume = {30}, number = {2}, year = {2016}, month = {Spring 2016}, pages = {245-248}, abstract = { This is a response to Peter Diamond{\textquoteright}s\ comment on a two paragraph passage in our article,\ Konstantin Kashin, Gary King, and Samir Soneji. 2015. {\textquotedblleft}Systematic Bias and Nontransparency in US Social Security Administration Forecasts.{\textquotedblright} Journal of Economic Perspectives, 2, 29: 239-258.\  }, url = {http://www.ingentaconnect.com/contentone/aea/jep/2016/00000030/00000002/art00012}, author = {Konstantin Kashin and Gary King and Samir Soneji} } @article {509026, title = {Urban observatories: City data can inform decision theory}, journal = {Nature}, volume = {519}, year = {2015}, pages = {291}, abstract = {Data are being collected on human behaviour in cities such as London, New York, Singapore and Shanghai, with a view to meeting city dwellers{\textquoteright} needs more effectively. Incorporating decision-making theory into analyses of the data from these {\textquoteright}urban observatories{\textquoteright} would yield further valuable information.}, url = {http://www.nature.com/nature/journal/v519/n7543/full/519291b.html}, author = {Aristides A. N. Patrinos and Hannah Bayer and Paul W. Glimcher and Steven Koonin and Miyoung Chun and Gary King} } @article {213571, title = {Automating Open Science for Big Data}, journal = {ANNALS of the American Academy of Political and Social Science}, volume = {659}, number = {1}, year = {2015}, pages = {260-273}, abstract = {The vast majority of social science research presently uses small (MB or GB scale) data sets. These fixed-scale data sets are commonly downloaded to the researcher{\textquoteright}s computer where the analysis is performed locally, and are often shared and cited with well-established technologies, such as the Dataverse Project (see Dataverse.org), to support the published results.\  The trend towards Big Data -- including large scale streaming data -- is starting to transform research and has the potential to impact policy-making and our understanding of the social, economic, and political problems that affect human societies.\  However, this research poses new challenges in execution, accountability, preservation, reuse, and reproducibility. Downloading these data sets to a researcher{\textquoteright}s computer is infeasible or not practical; hence, analyses take place in the cloud, require unusual expertise, and benefit from collaborative teamwork and novel tool development. The advantage of these data sets in how informative they are also means that they are much more likely to contain highly sensitive personally identifiable information. In this paper, we discuss solutions to these new challenges so that the social sciences can realize the potential of Big Data.}, url = {http://ann.sagepub.com.ezp-prod1.hul.harvard.edu/content/659/1/260.full.pdf+html}, author = {Merce Crosas and Gary King and James Honaker and Latanya Sweeney} } @article {240566, title = {Explaining Systematic Bias and Nontransparency in US Social Security Administration Forecasts}, journal = {Political Analysis}, volume = {23}, number = {3}, year = {2015}, pages = {336-362}, abstract = {The accuracy of U.S. Social Security Administration (SSA) demographic and financial forecasts is crucial for the solvency of its Trust Funds, other government programs, industry decision making, and the evidence base of many scholarly articles. Because SSA makes public little replication information and uses qualitative and antiquated statistical forecasting methods, fully independent alternative forecasts (and the ability to score policy proposals to change the system) are nonexistent. Yet, no systematic evaluation of SSA forecasts has ever been published by SSA or anyone else --- until a companion paper to this one (King, Kashin, and Soneji, 2015a). We show that SSA{\textquoteright}s forecasting errors were approximately unbiased until about 2000, but then began to grow quickly, with increasingly overconfident uncertainty intervals. Moreover, the errors are all in the same potentially dangerous direction, making the Social Security Trust Funds look healthier than they actually are. We extend and then attempt to explain these findings with evidence from a large number of interviews we conducted with participants at every level of the forecasting and policy processes. We show that SSA{\textquoteright}s forecasting procedures meet all the conditions the modern social-psychology and statistical literatures demonstrate make bias likely. When those conditions mixed with potent new political forces trying to change Social Security, SSA{\textquoteright}s actuaries hunkered down trying hard to insulate their forecasts from strong political pressures. Unfortunately, this otherwise laudable resistance to undue influence, along with their ad hoc qualitative forecasting models, led the actuaries to miss important changes in the input data. Retirees began living longer lives and drawing benefits longer than predicted by simple extrapolations. We also show that the solution to this problem involves SSA or Congress implementing in government two of the central projects of political science over the last quarter century: [1] promoting transparency in data and methods and [2] replacing with formal statistical models large numbers of qualitative decisions too complex for unaided humans to make optimally.}, url = {http://pan.oxfordjournals.org/lookup/doi/10.1093/pan/mpv011}, author = {Konstantin Kashin and Gary King and Samir Soneji} } @article {32225, title = {How Robust Standard Errors Expose Methodological Problems They Do Not Fix, and What to Do About It}, journal = {Political Analysis}, volume = {23}, number = {2}, year = {2015}, pages = {159{\textendash}179}, abstract = {"Robust standard errors" are used in a vast array of scholarship to correct standard errors for model misspecification. However, when misspecification is bad enough to make classical and robust standard errors diverge, assuming that it is nevertheless not so bad as to bias everything else requires considerable optimism. And even if the optimism is warranted, settling for a misspecified model, with or without robust standard errors, will still bias estimators of all but a few quantities of interest. The resulting cavernous gap between theory and practice suggests that considerable gains in applied statistics may be possible. We seek to help researchers realize these gains via a more productive way to understand and use robust standard errors; a new general and easier-to-use "generalized information matrix test" statistic that can formally assess misspecification (based on differences between robust and classical variance estimates); and practical illustrations via simulations and real examples from published research. How robust standard errors are used needs to change, but instead of jettisoning this popular tool we show how to use it to provide effective clues about model misspecification, likely biases, and a guide to considerably more reliable, and defensible, inferences. Accompanying this article is open source software that implements the methods we describe.\ }, url = {http://pan.oxfordjournals.org/content/23/2/159}, author = {Gary King and Margaret E. Roberts} } @article {251041, title = {Systematic Bias and Nontransparency in US Social Security Administration Forecasts}, journal = {Journal of Economic Perspectives}, volume = {29}, number = {2}, year = {2015}, pages = {239-258}, abstract = {The financial stability of four of the five largest U.S. federal entitlement programs, strategic decision making in several industries, and many academic publications all depend on the accuracy of demographic and financial forecasts made by the Social Security Administration (SSA). Although the SSA has performed these forecasts since 1942, no systematic and comprehensive evaluation of their accuracy has ever been published by SSA or anyone else. The absence of a systematic evaluation of forecasts is a concern because the SSA relies on informal procedures that are potentially subject to inadvertent biases and does not share with the public, the scientific community, or other parts of SSA sufficient data or information necessary to replicate or improve its forecasts. These issues result in SSA holding a monopoly position in policy debates as the sole supplier of fully independent forecasts and evaluations of proposals to change Social Security. To assist with the forecasting evaluation problem, we collect all SSA forecasts for years that have passed and discover error patterns that could have been---and could now be---used to improve future forecasts. Specifically, we find that after 2000, SSA forecasting errors grew considerably larger and most of these errors made the Social Security Trust Funds look more financially secure than they actually were. In addition, SSA{\textquoteright}s reported uncertainty intervals are overconfident and increasingly so after 2000. We discuss the implications of these systematic forecasting biases for public policy.}, url = {https://www.aeaweb.org/articles.php?doi=10.1257/jep.29.2.239}, author = {Konstantin Kashin and Gary King and Samir Soneji} } @article {154846, title = {The Parable of Google Flu: Traps in Big Data Analysis}, journal = {Science}, volume = {343}, number = {14 March}, year = {2014}, pages = {1203-1205}, abstract = {Large errors in flu prediction were largely avoidable, which offers lessons for the use of big data.In February 2013, Google Flu Trends (GFT) made headlines but not for a reason that Google executives or the creators of the flu tracking system would have hoped. Nature reported that GFT was predicting more than double the proportion of doctor visits for influenza-like illness (ILI) than the Centers for Disease Control and Prevention (CDC), which bases its estimates on surveillance reports from laboratories across the United States ( 1, 2). This happened despite the fact that GFT was built to predict CDC reports. Given that GFT is often held up as an exemplary use of big data ( 3, 4), what lessons can we draw from this error?See also "Google Flu Trends Still Appears Sick: An Evaluation of the 2013-2014 Flu Season" \  }, url = {https://www.science.org/doi/10.1126/science.1248506}, author = {Lazer, David and Ryan Kennedy and Gary King and Alessandro Vespignani} } @article {47551, title = {Restructuring the Social Sciences: Reflections from Harvard{\textquoteright}s Institute for Quantitative Social Science}, journal = {PS: Political Science and Politics}, volume = {47}, number = {1}, year = {2014}, pages = {165-172}, abstract = {The social sciences are undergoing a dramatic transformation from studying problems to solving them; from making do with a small number of sparse data sets to analyzing increasing quantities of diverse, highly informative data; from isolated scholars toiling away on their own to larger scale, collaborative, interdisciplinary, lab-style research teams; and from a purely academic pursuit to having a major impact on the world. To facilitate these important developments, universities, funding agencies, and governments need to shore up and adapt the infrastructure that supports social science research. We discuss some of these developments here, as well as a new type of organization we created at Harvard to help encourage them -- the Institute for Quantitative Social Science. \ An increasing number of universities are beginning efforts to respond with similar institutions. This paper provides some suggestions for how individual universities might respond and how we might work together to advance social science more generally. }, url = {http://journals.cambridge.org/repo_A9100Nlq}, author = {Gary King} } @article {101381, title = {Reverse-engineering censorship in China: Randomized experimentation and participant observation}, journal = {Science}, volume = {345}, number = {6199}, year = {2014}, note = {This work follows up on an article in the American Political Science Review on\ {\textquotedblleft}How Censorship In China Allows Government Criticism But Silences Collective Expression.{\textquotedblright}Science Magazine\ published a news story\ about this article, and their weekly\ Podcast\ led with an interview summarizing this work.}, pages = {1-10}, abstract = {Existing research on the extensive Chinese censorship organization uses observational\ methods with well-known limitations. We conducted the first large-scale experimental\ study of censorship by creating accounts on numerous social media sites, randomly\ submitting different texts, and observing from a worldwide network of computers which\ texts were censored and which were not. We also supplemented interviews with\ confidential sources by creating our own social media site, contracting with Chinese firms\ to install the same censoring technologies as existing sites, and{\textemdash}with their software,\ documentation, and even customer support{\textemdash}reverse-engineering how it all works. Our\ results offer rigorous support for the recent hypothesis that criticisms of the state, its\ leaders, and their policies are published, whereas posts about real-world events with\ collective action potential are censored.}, url = {http://www.sciencemag.org/content/345/6199/1251722.abstract}, author = {Gary King and Jennifer Pan and Margaret E. Roberts} } @article {690945, title = {Twitter: Big data opportunities{\textemdash}Response}, journal = {Science}, volume = {345}, number = {6193}, year = {2014}, pages = {148-149}, abstract = {WE THANK BRONIATOWSKI, Paul, and Dredze for giving us the opportunity to reemphasize the potential of big data and make the more obvious point that not all big data projects have the problems currently plaguing Google Flu Trends (GFT), nor are these problems inherent to the field in general.See our original papers: "The Parable of Google Flu: Traps in Big Data Analysis," and "Google Flu Trends Still Appears Sick: An Evaluation of the 2013-2014 Flu Season"}, url = {https://www.science.org/doi/10.1126/science.345.6193.148-b}, author = {Lazer, David and Ryan Kennedy and Gary King and Alessandro Vespignani} } @article {33531, title = {How Censorship in China Allows Government Criticism but Silences Collective Expression}, journal = {American Political Science Review}, volume = {107}, number = {2 (May)}, year = {2013}, note = {Please see our followup article published in Science, {\textquotedblleft}Reverse-Engineering Censorship In China: Randomized Experimentation And Participant Observation.{\textquotedblright}}, pages = {1-18}, abstract = {We offer the first large scale, multiple source analysis of the outcome of what may be the most extensive effort to selectively censor human expression ever implemented. To do this, we have devised a system to locate, download, and analyze the content of millions of social media posts originating from nearly 1,400 different social media services all over China before the Chinese government is able to find, evaluate, and censor (i.e., remove from the Internet) the large subset they deem objectionable. Using modern computer-assisted text analytic methods that we adapt to and validate in the Chinese language, we compare the substantive content of posts censored to those not censored over time in each of 85 topic areas. Contrary to previous understandings, posts with negative, even vitriolic, criticism of the state, its leaders, and its policies are not more likely to be censored. Instead, we show that the censorship program is aimed at curtailing collective action by silencing comments that represent, reinforce, or spur social mobilization, regardless of content. Censorship is oriented toward attempting to forestall collective activities that are occurring now or may occur in the future --- and, as such, seem to clearly expose government intent.}, author = {Gary King and Jennifer Pan and Margaret E. Roberts} } @article {33771, title = {How Social Science Research Can Improve Teaching}, journal = {PS: Political Science and Politics}, volume = {46}, number = {3}, year = {2013}, pages = {621-629}, abstract = {We marshal discoveries about human behavior and learning from social science research and show how they can be used to improve teaching and learning. The discoveries are easily stated as three social science generalizations: (1) social connections motivate, (2) teaching teaches the teacher, and (3) instant feedback improves learning. We show how to apply these generalizations via innovations in modern information technology inside, outside, and across university classrooms. We also give concrete examples of these ideas from innovations we have experimented with in our own teaching. See also a video presentation of this talk before the Harvard Board of Overseers}, author = {Gary King and Maya Sen} } @article {37219, title = {The Troubled Future of Colleges and Universities (with comments from five scholar-administrators)}, journal = {PS: Political Science and Politics}, volume = {46}, number = {1}, year = {2013}, pages = {81--113}, abstract = {The American system of higher education is under attack by political, economic, and educational forces that threaten to undermine its business model, governmental support, and operating mission. The potential changes are considerably more dramatic and disruptive than what we{\textquoteright}ve already experienced. Traditional colleges and universities urgently need a coherent, thought-out response. Their central role in ensuring the creation, preservation, and distribution of knowledge may be at risk and, as a consequence, so too may be the spectacular progress across fields we have come to expect as a result. Symposium contributors include Henry E. Brady, John Mark Hansen, Gary King, Nannerl O. Keohane, Michael Laver, Virginia Sapiro, and Maya Sen.}, author = {Gary King and Maya Sen} } @article {IacKinPor09, title = {Causal Inference Without Balance Checking: Coarsened Exact Matching}, journal = {Political Analysis}, volume = {20}, number = {1}, year = {2012}, pages = {1--24}, abstract = { We discuss a method for improving causal inferences called "Coarsened Exact Matching{\textquoteright}{\textquoteright} (CEM), and the new "Monotonic Imbalance Bounding{\textquoteright}{\textquoteright} (MIB) class of matching methods from which CEM is derived. We summarize what is known about CEM and MIB, derive and illustrate several new desirable statistical properties of CEM, and then propose a variety of useful extensions. We show that CEM possesses a wide range of desirable statistical properties not available in most other matching methods, but is at the same time exceptionally easy to comprehend and use. We focus on the connection between theoretical properties and practical applications. We also make available easy-to-use open source software for R and Stata which implement all our suggestions. See also:\ \ An Explanation of CEM Weights }, url = {https://www.cambridge.org/core/journals/political-analysis/article/causal-inference-without-balance-checking-coarsened-exact-matching/5ABCF5B3FC3089A87FD59CECBB3465C0}, author = {Stefano M. Iacus and Gary King and Giuseppe Porro} } @article {36577, title = {Estimating Partisan Bias of the Electoral College Under Proposed Changes in Elector Apportionment}, journal = {Statistics, Politics, and Policy}, year = {2012}, pages = {1-13}, abstract = {In the election for President of the United States, the Electoral College is the body whose members vote to elect the President directly. Each state sends a number of delegates equal to its total number of representatives and senators in Congress; all but two states (Nebraska and Maine) assign electors pledged to the candidate that wins the state{\textquoteright}s plurality vote. We investigate the effect on presidential elections if states were to assign their electoral votes according to results in each congressional district,and conclude that the direct popular vote and the current electoral college are both substantially fairer compared to those alternatives where states would have divided their electoral votes by congressional district. }, url = {http://www.degruyter.com/view/j/spp.ahead-of-print/spp-2012-0001/spp-2012-0001.xml?format=INT}, author = {A. C. Thomas and Andrew Gelman and Gary King and Jonathan N. Katz} } @article {27566, title = {Letter to the Editor on the "Medicare Health Support Pilot Program" (by McCall and Cromwell)}, journal = {New England Journal of Medicine}, volume = {366}, number = {7}, year = {2012}, pages = {667}, url = {http://www.nejm.org/doi/full/10.1056/NEJMc1114006}, author = {Gary King and Richard Nielsen and Aaron Wells} } @article {9075, title = {Statistical Security for Social Security}, journal = {Demography}, volume = {49}, number = {3}, year = {2012}, pages = {1037-1060 }, abstract = {The financial viability of Social Security, the single largest U.S. Government program, depends on accurate forecasts of the solvency of its intergenerational trust fund. We begin by detailing information necessary for replicating the Social Security Administration{\textquoteright}s (SSA{\textquoteright}s) forecasting procedures, which until now has been unavailable in the public domain. We then offer a way to improve the quality of these procedures due to age-and sex-specific mortality forecasts. The most recent SSA mortality forecasts were based on the best available technology at the time, which was a combination of linear extrapolation and qualitative judgments. Unfortunately, linear extrapolation excludes known risk factors and is inconsistent with long-standing demographic patterns such as the smoothness of age profiles. Modern statistical methods typically outperform even the best qualitative judgments in these contexts. We show how to use such methods here, enabling researchers to forecast using far more information, such as the known risk factors of smoking and obesity and known demographic patterns. Including this extra information makes a sub{\textlnot}stantial difference: For example, by only improving mortality forecasting methods, we predict three fewer years of net surplus, $730 billion less in Social Security trust funds, and program costs that are 0.66\% greater of projected taxable payroll compared to SSA projections by 2031. More important than specific numerical estimates are the advantages of transparency, replicability, reduction of uncertainty, and what may be the resulting lower vulnerability to the politicization of program forecasts. In addition, by offering with this paper software and detailed replication information, we hope to marshal the efforts of the research community to include ever more informative inputs and to continue to reduce the uncertainties in Social Security forecasts. This work builds on our article that provides forecasts of US Mortality rates (see King and Soneji,\ The Future of Death in America), a book developing improved methods for forecasting mortality (Girosi and King, Demographic Forecasting), all data we used (King and Soneji,\ replication data sets), and open source software that implements the methods (Girosi and King, YourCast). \ Also available is a\ New York Times Op-Ed based on this work (King and Soneji, Social Security: It{\textquoteright}s Worse Than You Think), and a replication data set for the Op-Ed (King and Soneji, replication data set).}, url = {http://link.springer.com/article/10.1007\%2Fs13524-012-0106-z}, author = {Samir Soneji and Gary King} } @article {24602, title = {Amelia II: A Program for Missing Data}, journal = {Journal of Statistical Software}, volume = {45}, number = {7}, year = {2011}, pages = {1-47}, abstract = {Amelia II is a complete R package for multiple imputation of missing data. The package implements a new expectation-maximization with bootstrapping algorithm that works faster, with larger numbers of variables, and is far easier to use, than various Markov chain Monte Carlo approaches, but gives essentially the same answers. The program also improves imputation models by allowing researchers to put Bayesian priors on individual cell values, thereby including a great deal of potentially valuable and extensive information. It also includes features to accurately impute cross-sectional datasets, individual time series, or sets of time series for different cross-sections. A full set of graphical diagnostics are also available. The program is easy to use, and the simplicity of the algorithm makes it far more robust; both a simple command line and extensive graphical user interface are included. Amelia II software web site }, author = {James Honaker and Gary King and Matthew Blackwell} } @article {WanKinLau07, title = {Anchors: Software for Anchoring Vignettes Data}, journal = {Journal of Statistical Software}, volume = {42}, number = {3}, year = {2011}, pages = {1--25}, abstract = {When respondents use the ordinal response categories of standard survey questions in different ways, the validity of analyses based on the resulting data can be biased. Anchoring vignettes is a survey design technique intended to correct for some of these problems. The anchors package in R includes methods for evaluating and choosing anchoring vignettes, and for analyzing the resulting data.}, url = {http://www.jstatsoft.org/v42/i03/}, author = {Jonathan Wand and Gary King and Olivia Lau} } @article {6824, title = {Avoiding Randomization Failure in Program Evaluation}, journal = {Population Health Management}, volume = {14}, number = {1}, year = {2011}, month = {2011}, pages = {S11-S22}, abstract = {We highlight common problems in the application of random treatment assignment in large scale program evaluation. Random assignment is the defining feature of modern experimental design. Yet, errors in design, implementation, and analysis often result in real world applications not benefiting from the advantages of randomization. The errors we highlight cover the control of variability, levels of randomization, size of treatment arms, and power to detect causal effects, as well as the many problems that commonly lead to post-treatment bias. We illustrate with an application to the Medicare Health Support evaluation, including recommendations for improving the design and analysis of this and other large scale randomized experiments.}, author = {Gary King and Richard Nielsen and Carter Coberley and James E. Pope and Aaron Wells} } @article {7321, title = {Ensuring the Data Rich Future of the Social Sciences}, journal = {Science}, volume = {331}, number = {11 February}, year = {2011}, month = {2011}, pages = {719-721}, abstract = {Massive increases in the availability of informative social science data are making dramatic progress possible in analyzing, understanding, and addressing many major societal problems. Yet the same forces pose severe challenges to the scientific infrastructure supporting data sharing, data management, informatics, statistical methodology, and research ethics and policy, and these are collectively holding back progress. I address these changes and challenges and suggest what can be done.}, author = {Gary King} } @article {24356, title = {Estimating Incidence Curves of Several Infections Using Symptom Surveillance Data}, journal = {PLoS ONE}, volume = {6}, number = {8}, year = {2011}, pages = {e23380}, abstract = {We introduce a method for estimating incidence curves of several co-circulating infectious pathogens, where each infection has its own probabilities of particular symptom profiles. Our deconvolution method utilizes weekly surveillance data on symptoms from a defined population as well as additional data on symptoms from a sample of virologically confirmed infectious episodes. We illustrate this method by numerical simulations and by using data from a survey conducted on the University of Michigan campus. Last, we describe the data needs to make such estimates accurate. Link to PLoS version }, author = {Edward Goldstein and Benjamin J. Cowling and Allison E. Aiello and Saki Takahashi and Gary King and Ying Lu and Marc Lipsitch} } @article {KinSon09, title = {The Future of Death in America}, journal = {Demographic Research}, volume = {25}, number = {1}, year = {2011}, pages = {1--38}, abstract = {Population mortality forecasts are widely used for allocating public health expenditures, setting research priorities, and evaluating the viability of public pensions, private pensions, and health care financing systems. In part because existing methods seem to forecast worse when based on more information, most forecasts are still based on simple linear extrapolations that ignore known biological risk factors and other prior information. We adapt a Bayesian hierarchical forecasting model capable of including more known health and demographic information than has previously been possible. This leads to the first age- and sex-specific forecasts of American mortality that simultaneously incorporate, in a formal statistical model, the effects of the recent rapid increase in obesity, the steady decline in tobacco consumption, and the well known patterns of smooth mortality age profiles and time trends. Formally including new information in forecasts can matter a great deal. For example, we estimate an increase in male life expectancy at birth from 76.2 years in 2010 to 79.9 years in 2030, which is 1.8 years greater than the U.S. Social Security Administration projection and 1.5 years more than U.S. Census projection. For females, we estimate more modest gains in life expectancy at birth over the next twenty years from 80.5 years to 81.9 years, which is virtually identical to the Social Security Administration projection and 2.0 years less than U.S. Census projections. We show that these patterns are also likely to greatly affect the aging American population structure. We offer an easy-to-use approach so that researchers can include other sources of information and potentially improve on our forecasts too.}, url = {http://www.demographic-research.org/volumes/vol25/1/}, author = {Gary King and Samir Soneji} } @article {6876, title = {General Purpose Computer-Assisted Clustering and Conceptualization}, journal = {Proceedings of the National Academy of Sciences}, year = {2011}, abstract = {We develop a computer-assisted method for the discovery of insightful conceptualizations, in the form of clusterings (i.e., partitions) of input objects. Each of the numerous fully automated methods of cluster analysis proposed in statistics, computer science, and biology optimize a different objective function. Almost all are well defined, but how to determine before the fact which one, if any, will partition a given set of objects in an "insightful" or "useful" way for a given user is unknown and difficult, if not logically impossible. We develop a metric space of partitions from all existing cluster analysis methods applied to a given data set (along with millions of other solutions we add based on combinations of existing clusterings), and enable a user to explore and interact with it, and quickly reveal or prompt useful or insightful conceptualizations. In addition, although uncommon in unsupervised learning problems, we offer and implement evaluation designs that make our computer-assisted approach vulnerable to being proven suboptimal in specific data types. We demonstrate that our approach facilitates more efficient and insightful discovery of useful information than either expert human coders or many existing fully automated methods.}, url = {http://www.pnas.org/content/early/2011/01/31/1018067108.abstract}, author = {Justin Grimmer and Gary King} } @article {HoImaKin07a, title = {MatchIt: Nonparametric Preprocessing for Parametric Causal Inference}, journal = {Journal of Statistical Software}, volume = {42}, number = {8}, year = {2011}, note = {See also MatchIt Software}, pages = {1--28}, abstract = {MatchIt implements the suggestions of Ho, Imai, King, and Stuart (2007) for improving parametric statistical models by preprocessing data with nonparametric matching methods. MatchIt implements a wide range of sophisticated matching methods, making it possible to greatly reduce the dependence of causal inferences on hard-to-justify, but commonly made, statistical modeling assumptions. The software also easily fits into existing research practices since, after preprocessing data with MatchIt, researchers can use whatever parametric model they would have used without MatchIt, but produce inferences with substantially more robustness and less sensitivity to modeling assumptions. MatchIt is an R program, and also works seamlessly with Zelig.}, url = {https://www.jstatsoft.org/article/view/v042i08}, author = {Daniel E. Ho and Kosuke Imai and Gary King and Elizabeth A. Stuart} } @article {IacKinPor09a, title = {Multivariate Matching Methods That are Monotonic Imbalance Bounding}, journal = {Journal of the American Statistical Association}, volume = {106}, number = {493}, year = {2011}, month = {2011}, pages = {345-361}, abstract = {We introduce a new "Monotonic Imbalance Bounding" (MIB) class of matching methods for causal inference with a surprisingly large number of attractive statistical properties. MIB generalizes and extends in several new directions the only existing class, "Equal Percent Bias Reducing" (EPBR), which is designed to satisfy weaker properties and only in expectation. We also offer strategies to obtain specific members of the MIB class, and analyze in more detail a member of this class, called Coarsened Exact Matching, whose properties we analyze from this new perspective. We offer a variety of analytical results and numerical simulations that demonstrate how members of the MIB class can dramatically improve inferences relative to EPBR-based matching methods.}, author = {Stefano M. Iacus and Gary King and Giuseppe Porro} } @article {SteKinShi, title = {Deaths From Heart Failure: Using Coarsened Exact Matching to Correct Cause of Death Statistics}, journal = {Population Health Metrics}, volume = {8}, number = {6}, year = {2010}, abstract = {Background: Incomplete information on death certificates makes recorded cause of death data less useful for public health monitoring and planning. Certifying physicians sometimes list only the mode of death (and in particular, list heart failure) without indicating the underlying disease(s) that gave rise to the death. This can prevent valid epidemiologic comparisons across countries and over time. Methods and Results: We propose that coarsened exact matching be used to infer the underlying causes of death where only the mode of death is known; we focus on the case of heart failure in U.S., Mexican and Brazilian death records. Redistribution algorithms derived using this method assign the largest proportion of heart failure deaths to ischemic heart disease in all three countries (53\%, 26\% and 22\%), with larger proportions assigned to hypertensive heart disease and diabetes in Mexico and Brazil (16\% and 23\% vs. 7\% for hypertensive heart disease and 13\% and 9\% vs. 6\% for diabetes). Reassigning these heart failure deaths increases US ischemic heart disease mortality rates by 6\%.Conclusions: The frequency with which physicians list heart failure in the causal chain for various underlying causes of death allows for inference about how physicians use heart failure on the death certificate in different settings. This easy-to-use method has the potential to reduce bias and increase comparability in cause-of-death data, thereby improving the public health utility of death records. Key Words: vital statistics, heart failure, population health, mortality, epidemiology}, author = {Gretchen Stevens and Gary King and Kenji Shibuya} } @article {6367, title = {Designing Verbal Autopsy Studies}, journal = {Population Health Metrics}, volume = {8}, number = {19}, year = {2010}, abstract = {Background: Verbal autopsy analyses are widely used for estimating cause-specific mortality rates (CSMR) in the vast majority of the world without high quality medical death registration. Verbal autopsies -- survey interviews with the caretakers of imminent decedents -- stand in for medical examinations or physical autopsies, which are infeasible or culturally prohibited. Methods and Findings: We introduce methods, simulations, and interpretations that can improve the design of automated, data-derived estimates of CSMRs, building on a new approach by King and Lu (2008). Our results generate advice for choosing symptom questions and sample sizes that is easier to satisfy than existing practices. For example, most prior effort has been devoted to searching for symptoms with high sensitivity and specificity, which has rarely if ever succeeded with multiple causes of death. In contrast, our approach makes this search irrelevant because it can produce unbiased estimates even with symptoms that have very low sensitivity and specificity. In addition, the new method is optimized for survey questions caretakers can easily answer rather than questions physicians would ask themselves. We also offer an automated method of weeding out biased symptom questions and advice on how to choose the number of causes of death, symptom questions to ask, and observations to collect, among others. Conclusions: With the advice offered here, researchers should be able to design verbal autopsy surveys and conduct analyses with greatly reduced statistical biases and research costs. }, author = {Gary King and Ying Lu and Kenji Shibuya} } @article {HopKin09b, title = {Improving Anchoring Vignettes: Designing Surveys to Correct Interpersonal Incomparability}, journal = {Public Opinion Quarterly}, year = {2010}, pages = {1-22}, abstract = {We report the results of several randomized survey experiments designed to evaluate two intended improvements to anchoring vignettes, an increasingly common technique used to achieve interpersonal comparability in survey research.\  This technique asks for respondent self-assessments followed by assessments of hypothetical people described in vignettes. Variation in assessments of the vignettes across respondents reveals interpersonal incomparability and allows researchers to make responses more comparable by rescaling them. Our experiments show, first, that switching the question order so that self-assessments follow the vignettes primes respondents to define the response scale in a common way.\  In this case, priming is not a bias to avoid but a means of better communicating the question{\textquoteright}s meaning.\  We then demonstrate that combining vignettes and self-assessments in a single direct comparison induces inconsistent and less informative responses.\  Since similar combined strategies are widely employed for related purposes, our results indicate that anchoring vignettes could reduce measurement error in many applications where they are not currently used.\  Data for our experiments come from a national telephone survey and a separate on-line survey.}, author = {Daniel Hopkins and Gary King} } @article {HopKin10, title = {A Method of Automated Nonparametric Content Analysis for Social Science}, journal = {American Journal of Political Science}, volume = {54}, number = {1}, year = {2010}, month = {01/2010}, pages = {229{\textendash}247}, abstract = {The increasing availability of digitized text presents enormous opportunities for social scientists. Yet hand coding many blogs, speeches, government records, newspapers, or other sources of unstructured text is infeasible. Although computer scientists have methods for automated content analysis, most are optimized to classify individual documents, whereas social scientists instead want generalizations about the population of documents, such as the proportion in a given category. Unfortunately, even a method with a high percent of individual documents correctly classified can be hugely biased when estimating category proportions. By directly optimizing for this social science goal, we develop a method that gives approximately unbiased estimates of category proportions even when the optimal classifier performs poorly. We illustrate with diverse data sets, including the daily expressed opinions of thousands of people about the U.S. presidency. We also make available software that implements our methods and large corpora of text for further analysis. This article led to the formation of\ Crimson Hexagon}, author = {Daniel Hopkins and Gary King} } @article {HonKin10, title = {What to do About Missing Values in Time Series Cross-Section Data}, journal = {American Journal of Political Science}, volume = {54}, number = {3}, year = {2010}, month = {2010}, pages = {561-581}, abstract = {Applications of modern methods for analyzing data with missing values, based primarily on multiple imputation, have in the last half-decade become common in American politics and political behavior. Scholars in these fields have thus increasingly avoided the biases and inefficiencies caused by ad hoc methods like listwise deletion and best guess imputation. However, researchers in much of comparative politics and international relations, and others with similar data, have been unable to do the same because the best available imputation methods work poorly with the time-series cross-section data structures common in these fields. We attempt to rectify this situation. First, we build a multiple imputation model that allows smooth time trends, shifts across cross-sectional units, and correlations over time and space, resulting in far more accurate imputations. Second, we build nonignorable missingness models by enabling analysts to incorporate knowledge from area studies experts via priors on individual missing cell values, rather than on difficult-to-interpret model parameters. Third, since these tasks could not be accomplished within existing imputation algorithms, in that they cannot handle as many variables as needed even in the simpler cross-sectional data for which they were designed, we also develop a new algorithm that substantially expands the range of computationally feasible data types and sizes for which multiple imputation can be used. These developments also made it possible to implement the methods introduced here in freely available open source software that is considerably more reliable than existing strategies.}, url = {http://gking.harvard.edu/files/abs/pr-abs.shtml}, author = {James Honaker and Gary King} } @article {BlaIacKinPor09, title = {CEM: Coarsened Exact Matching in Stata}, journal = {The Stata Journal}, volume = {9}, year = {2009}, pages = {524{\textendash}546}, abstract = {In this article, we introduce a Stata implementation of coarsened exact matching, a new method for improving the estimation of causal effects by reducing imbalance in covariates between treated and control groups. Coarsened exact matching is faster, is easier to use and understand, requires fewer assumptions, is more easily automated, and possesses more attractive statistical properties for many applications than do existing matching methods. In coarsened exact matching, users temporarily coarsen their data, exact match on these coarsened data, and then run their analysis on the uncoarsened, matched data. Coarsened exact matching bounds the degree of model dependence and causal effect estimation error by ex ante user choice, is monotonic imbalance bounding (so that reducing the maximum imbalance on one variable has no effect on others), does not require a separate procedure to restrict data to common support, meets the congruence principle, is approximately invariant to measurement error, balances all nonlinearities and interactions in sample (i.e., not merely in expectation), and works with multiply imputed datasets. Other matching methods inherit many of the coarsened exact matching method{\textquoteright}s properties when applied to further match data preprocessed by coarsened exact matching. The cem command implements the coarsened exact matching algorithm in Stata.}, author = {Matthew Blackwell and Stefano Iacus and Gary King and Giuseppe Porro} } @article {IacKinPor09b, title = {CEM: Software for Coarsened Exact Matching}, journal = {Journal of Statistical Software}, volume = {30}, year = {2009}, abstract = {This program is designed to improve causal inference via a method of matching that is widely applicable in observational data and easy to understand and use (if you understand how to draw a histogram, you will understand this method). The program implements the coarsened exact matching (CEM) algorithm, described below. CEM may be used alone or in combination with any existing matching method. This algorithm, and its statistical properties, are described in Iacus, King, and Porro (2008).}, url = {http://gking.harvard.edu/cem}, author = {Stefano M. Iacus and Gary King and Giuseppe Porro} } @article {LazPenAda09, title = {Computational Social Science}, journal = {Science}, volume = {323}, year = {2009}, pages = {721-723}, abstract = {A field is emerging that leverages the capacity to collect and analyze data at a scale that may reveal patterns of individual and group behaviors.}, author = {Lazer, David and Pentland, Alex and Adamic, Lada and Aral, Sinan and Barabasi, Albert-Laszlo and Brewer, Devon and Christakis, Nicholas and Contractor, Noshir and Fowler, James and Myron Gutmann and Jebara, Tony and Gary King and Macy, Michael and Roy, Deb and Van Alstyne, Marshall} } @article {25141, title = {Empirical versus Theoretical Claims about Extreme Counterfactuals: A Response}, journal = {Political Analysis}, volume = {17}, year = {2009}, pages = {107-112}, abstract = {In response to the data-based measures of model dependence proposed in King and Zeng (2006), Sambanis and Michaelides (2008) propose alternative measures that rely upon assumptions untestable in observational data. If these assumptions are correct, then their measures are appropriate and ours, based solely on the empirical data, may be too conservative. If instead and as is usually the case, the researcher is not certain of the precise functional form of the data generating process, the distribution from which the data are drawn, and the applicability of these modeling assumptions to new counterfactuals, then the data-based measures proposed in King and Zeng (2006) are much preferred. After all, the point of model dependence checks is to verify empirically, rather than to stipulate by assumption, the effects of modeling assumptions on counterfactual inferences. }, author = {Gary King and Langche Zeng} } @article {ImaKinNal09, title = {The Essential Role of Pair Matching in Cluster-Randomized Experiments, with Application to the Mexican Universal Health Insurance Evaluation}, journal = {Statistical Science}, volume = {24}, year = {2009}, pages = {29{\textendash}53}, abstract = {A basic feature of many field experiments is that investigators are only able to randomize clusters of individuals{\textendash}-such as households, communities, firms, medical practices, schools, or classrooms{\textendash}-even when the individual is the unit of interest. To recoup the resulting efficiency loss, some studies pair similar clusters and randomize treatment within pairs. However, many other studies avoid pairing, in part because of claims in the literature, echoed by clinical trials standards organizations, that this matched-pair, cluster-randomization design has serious problems. We argue that all such claims are unfounded. We also prove that the estimator recommended for this design in the literature is unbiased only in situations when matching is unnecessary and and its standard error is also invalid. To overcome this problem without modeling assumptions, we develop a simple design-based estimator with much improved statistical properties. We also propose a model-based approach that includes some of the benefits of our design-based estimator as well as the estimator in the literature. Our methods also address individual-level noncompliance, which is common in applications but not allowed for in most existing methods. We show that from the perspective of bias, efficiency, power, robustness, or research costs, and in large or small samples, pairing should be used in cluster-randomized experiments whenever feasible and failing to do so is equivalent to discarding a considerable fraction of one{\textquoteright}s data. We develop these techniques in the context of a randomized evaluation we are conducting of the Mexican Universal Health Insurance Program.}, author = {Kosuke Imai and Gary King and Clayton Nall} } @article {ImaKinNal09d, title = {Matched Pairs and the Future of Cluster-Randomized Experiments: A Rejoinder}, journal = {Statistical Science}, volume = {24}, year = {2009}, pages = {64{\textendash}72}, abstract = {A basic feature of many field experiments is that investigators are only able to randomize clusters of individuals{\textendash}-such as households, communities, firms, medical practices, schools, or classrooms{\textendash}-even when the individual is the unit of interest. To recoup the resulting efficiency loss, some studies pair similar clusters and randomize treatment within pairs. However, many other studies avoid pairing, in part because of claims in the literature, echoed by clinical trials standards organizations, that this matched-pair, cluster-randomization design has serious problems. We argue that all such claims are unfounded. We also prove that the estimator recommended for this design in the literature is unbiased only in situations when matching is unnecessary and and its standard error is also invalid. To overcome this problem without modeling assumptions, we develop a simple design-based estimator with much improved statistical properties. We also propose a model-based approach that includes some of the benefits of our design-based estimator as well as the estimator in the literature. Our methods also address individual-level noncompliance, which is common in applications but not allowed for in most existing methods. We show that from the perspective of bias, efficiency, power, robustness, or research costs, and in large or small samples, pairing should be used in cluster-randomized experiments whenever feasible and failing to do so is equivalent to discarding a considerable fraction of one{\textquoteright}s data. We develop these techniques in the context of a randomized evaluation we are conducting of the Mexican Universal Health Insurance Program.}, author = {Kosuke Imai and Gary King and Clayton Nall} } @article {MyrAbrAdaAltArmBolCarCraDonKinLylPieRocRocYou09, title = {From Preserving the Past to Preserving the Future: The Data-PASS Project and the Challenges of Preserving Digital Social Science Data}, journal = {Library Trends}, volume = {57}, year = {2009}, pages = {315{\textendash}337}, abstract = {Social science data are an unusual part of the past, present, and future of digital preservation. They are both an unqualified success, due to long-lived and sustainable archival organizations, and in need of further development because not all digital content is being preserved. This article is about the Data Preservation Alliance for Social Sciences (Data-PASS), a project supported by the National Digital Information Infrastructure and Preservation Program (NDIIPP), which is a partnership of five major U.S. social science data archives. Broadly speaking, Data-PASS has the goal of ensuring that at-risk social science data are identified, acquired, and preserved, and that we have a future-oriented organization that could collaborate on those preservation tasks for the future. Throughout the life of the Data-PASS project we have worked to identify digital materials that have never been systematically archived, and to appraise and acquire them. As the project has progressed, however, it has increasingly turned its attention from identifying and acquiring legacy and at-risk social science data to identifying on going and future research projects that will produce data. This article is about the project{\textquoteright}s history, with an emphasis on the issues that underlay the transition from looking backward to looking forward.}, author = {Gutmann, Myron P. and Mark Abrahamson and Margaret O. Adams and Micah Altman and Caroline Arms and Kenneth Bollen and Michael Carlson and Jonathan Crabtree and Darrell Donakowski and Gary King and Jaret Lyle and Marc Maynard and Amy Pienta and Richard Rockwell and Lois Rocms-Ferrara and Copeland H. Young} } @article {AbrBolGut09, title = {Preserving Quantitative Research-Elicited Data for Longitudinal Analysis. New Developments in Archiving Survey Data in the U.S.}, journal = {Historical Social Research}, volume = {34}, number = {3}, year = {2009}, pages = {51-59}, abstract = {Social science data collected in the United States, both historically and at present, have often not been placed in any public archive -- even when the data collection was supported by government grants. The availability of the data for future use is, therefore, in jeopardy. Enforcing archiving norms may be the only way to increase data preservation and availability in the future. }, author = {Mark Abrahamson and Kenneth A. Bollen and Gutmann, Myron P. and Gary King and Amy Pienta} } @article {KinGakIma09, title = {Public Policy for the Poor? A Randomised Assessment of the Mexican Universal Health Insurance Programme}, journal = {The Lancet}, volume = {373}, year = {2009}, month = {04/2009}, pages = {1447-1454}, abstract = {Background: We assessed aspects of Seguro Popular, a programme aimed to deliver health insurance, regular and preventive medical care, medicines, and health facilities to 50 million uninsured Mexicans. Methods: We randomly assigned treatment within 74 matched pairs of health clusters{\textendash}-i.e., health facility catchment areas{\textendash}-representing 118,569 households in seven Mexican states, and measured outcomes in a 2005 baseline survey (August 2005, to September 2005) and follow-up survey 10 months later (July 2006, to August 2006) in 50 pairs (n=32 515). The treatment consisted of encouragement to enrol in a health-insurance programme and upgraded medical facilities. Participant states also received funds to improve health facilities and to provide medications for services in treated clusters. We estimated intention to treat and complier average causal effects non-parametrically. Findings: Intention-to-treat estimates indicated a 23\% reduction from baseline in catastrophic expenditures (1{\textperiodcentered}9\% points and 95\% CI 0{\textperiodcentered}14-3{\textperiodcentered}66). The effect in poor households was 3{\textperiodcentered}0\% points (0{\textperiodcentered}46-5{\textperiodcentered}54) and in experimental compliers was 6{\textperiodcentered}5\% points (1{\textperiodcentered}65-11{\textperiodcentered}28), 30\% and 59\% reductions, respectively. The intention-to-treat effect on health spending in poor households was 426 pesos (39-812), and the complier average causal effect was 915 pesos (147-1684). Contrary to expectations and previous observational research, we found no effects on medication spending, health outcomes, or utilisation. Interpretation: Programme resources reached the poor. However, the programme did not show some other effects, possibly due to the short duration of treatment (10 months). Although Seguro Popular seems to be successful at this early stage, further experiments and follow-up studies, with longer assessment periods, are needed to ascertain the long-term effects of the programme.}, author = {Gary King and Emmanuela Gakidou and Kosuke Imai and Jason Lakin and Ryan T. Moore and Clayton Nall and Nirmala Ravishankar and Manett Vargas and Martha Mar{\'\i}a T{\'e}llez-Rojo and Juan Eugenio Hern{\'a}ndez {\'A}vila and Mauricio Hern{\'a}ndez {\'A}vila and H{\'e}ctor Hern{\'a}ndez Llamas} } @article {MurKin08, title = {The Effects of International Monetary Fund Loans on Health Outcomes}, journal = {PLoS Medicine}, volume = {5}, year = {2008}, month = {June}, abstract = {A "Perspective" article that discusses an article by David Stuckler and colleagues showing that, in Eastern European and former Soviet countries, participation in International Monetary Fund economic programs have been associated with higher mortality rates from tuberculosis.}, author = {Megan Murray and Gary King} } @article {5614, title = {The Future of Partisan Symmetry as a Judicial Test for Partisan Gerrymandering after LULAC v. Perry}, journal = {Election Law Journal}, volume = {6}, number = {1}, year = {2008}, month = {01/2007}, pages = {2-35}, chapter = {2}, abstract = {While the Supreme Court in Bandemer v. Davis found partisan gerrymandering to be justiciable, no challenged redistricting plan in the subsequent 20 years has been held unconstitutional on partisan grounds. Then, in Vieth v. Jubilerer, five justices concluded that some standard might be adopted in a future case, if a manageable rule could be found. When gerrymandering next came before the Court, in LULAC v. Perry, we along with our colleagues filed an Amicus Brief (King et al., 2005), proposing the test be based in part on the partisan symmetry standard. Although the issue was not resolved, our proposal was discussed and positively evaluated in three of the opinions, including the plurality judgment, and for the first time for any proposal the Court gave a clear indication that a future legal test for partisan gerrymandering will likely include partisan symmetry. A majority of Justices now appear to endorse the view that the measurement of partisan symmetry may be used in partisan gerrymandering claims as {\textquotedblleft}a helpful (though certainly not talismanic) tool{\textquotedblright} (Justice Stevens, joined by Justice Breyer), provided one recognizes that {\textquotedblleft}asymmetry alone is not a reliable measure of unconstitutional partisanship{\textquotedblright} and possibly that the standard would be applied only after at least one election has been held under the redistricting plan at issue (Justice Kennedy, joined by Justices Souter and Ginsburg). We use this essay to respond to the request of Justices Souter and Ginsburg that {\textquotedblleft}further attention {\textellipsis} be devoted to the administrability of such a criterion at all levels of redistricting and its review.{\textquotedblright} Building on our previous scholarly work, our Amicus Brief, the observations of these five Justices, and a supporting consensus in the academic literature, we offer here a social science perspective on the conceptualization and measurement of partisan gerrymandering and the development of relevant legal rules based on what is effectively the Supreme Court{\textquoteright}s open invitation to lower courts to revisit these issues in the light of LULAC v. Perry.}, author = {Bernard Grofman and Gary King} } @article {ImaKinStu08, title = {Misunderstandings Among Experimentalists and Observationalists about Causal Inference}, journal = {Journal of the Royal Statistical Society, Series A}, volume = {171, part 2}, year = {2008}, pages = {481{\textendash}502}, abstract = {We attempt to clarify, and suggest how to avoid, several serious misunderstandings about and fallacies of causal inference in experimental and observational research. These issues concern some of the most basic advantages and disadvantages of each basic research design. Problems include improper use of hypothesis tests for covariate balance between the treated and control groups, and the consequences of using randomization, blocking before randomization, and matching after treatment assignment to achieve covariate balance. Applied researchers in a wide range of scientific disciplines seem to fall prey to one or more of these fallacies, and as a result make suboptimal design or analysis choices. To clarify these points, we derive a new four-part decomposition of the key estimation errors in making causal inferences. We then show how this decomposition can help scholars from different experimental and observational research traditions better understand each other{\textquoteright}s inferential problems and attempted solutions.}, author = {Kosuke Imai and Gary King and Elizabeth Stuart} } @article {6386, title = {Ordinary Economic Voting Behavior in the Extraordinary Election of Adolf Hitler}, journal = {Journal of Economic History}, volume = {68}, number = {4}, year = {2008}, month = {12/2008}, pages = {996}, chapter = {951}, abstract = { The enormous Nazi voting literature rarely builds on modern statistical or economic research. By adding these approaches, we find that the most widely accepted existing theories of this era cannot distinguish the Weimar elections from almost any others in any country. Via a retrospective voting account, we show that voters most hurt by the depression, and most likely to oppose the government, fall into separate groups with divergent interests. This explains why some turned to the Nazis and others turned away. The consequences of Hitler{\textquoteright}s election were extraordinary, but the voting behavior that led to it was not. }, author = {Gary King and Ori Rosen and Martin Tanner and Alexander Wagner} } @article {ImaKinLau07, title = {Toward A Common Framework for Statistical Analysis and Development}, journal = {Journal of Computational Graphics and Statistics}, volume = {17}, year = {2008}, pages = {1{\textendash}22}, abstract = {We describe some progress toward a common framework for statistical analysis and software development built on and within the R language, including R{\textquoteright}s numerous existing packages. The framework we have developed offers a simple unified structure and syntax that can encompass a large fraction of statistical procedures already implemented in R, without requiring any changes in existing approaches. We conjecture that it can be used to encompass and present simply a vast majority of existing statistical methods, regardless of the theory of inference on which they are based, notation with which they were developed, and programming syntax with which they have been implemented. This development enabled us, and should enable others, to design statistical software with a single, simple, and unified user interface that helps overcome the conflicting notation, syntax, jargon, and statistical methods existing across the methods subfields of numerous academic disciplines. The approach also enables one to build a graphical user interface that automatically includes any method encompassed within the framework. We hope that the result of this line of research will greatly reduce the time from the creation of a new statistical innovation to its widespread use by applied researchers whether or not they use or program in R.}, author = {Kosuke Imai and Gary King and Olivia Lau} } @article {KinLu08, title = {Verbal Autopsy Methods with Multiple Causes of Death}, journal = {Statistical Science}, volume = {23}, year = {2008}, pages = {78{\textendash}91}, abstract = {Verbal autopsy procedures are widely used for estimating cause-specific mortality in areas without medical death certification. Data on symptoms reported by caregivers along with the cause of death are collected from a medical facility, and the cause-of-death distribution is estimated in the population where only symptom data are available. Current approaches analyze only one cause at a time, involve assumptions judged difficult or impossible to satisfy, and require expensive, time consuming, or unreliable physician reviews, expert algorithms, or parametric statistical models. By generalizing current approaches to analyze multiple causes, we show how most of the difficult assumptions underlying existing methods can be dropped. These generalizations also make physician review, expert algorithms, and parametric statistical assumptions unnecessary. With theoretical results, and empirical analyses in data from China and Tanzania, we illustrate the accuracy of this approach. While no method of analyzing verbal autopsy data, including the more computationally intensive approach offered here, can give accurate estimates in all circumstances, the procedure offered is conceptually simpler, less expensive, more general, as or more replicable, and easier to use in practice than existing approaches. We also show how our focus on estimating aggregate proportions, which are the quantities of primary interest in verbal autopsy studies, may also greatly reduce the assumptions necessary, and thus improve the performance of, many individual classifiers in this and other areas. As a companion to this paper, we also offer easy-to-use software that implements the methods discussed herein.}, author = {Gary King and Ying Lu} } @article {KinWan07, title = {Comparing Incomparable Survey Responses: New Tools for Anchoring Vignettes}, journal = {Political Analysis}, volume = {15}, year = {2007}, month = {Winter}, pages = {46-66}, abstract = { When respondents use the ordinal response categories of standard survey questions in different ways, the validity of analyses based on the resulting data can be biased. Anchoring vignettes is a survey design technique, introduced by King, Murray, Salomon, and Tandon (2004), intended to correct for some of these problems. We develop new methods both for evaluating and choosing anchoring vignettes, and for analyzing the resulting data. With surveys on a diverse range of topics in a range of countries, we illustrate how our proposed methods can improve the ability of anchoring vignettes to extract information from survey data, as well as saving in survey administration costs. }, author = {Gary King and Jonathan Wand} } @article {KinZen07b, title = {Detecting Model Dependence in Statistical Inference: A Response}, journal = {International Studies Quarterly}, volume = {51}, year = {2007}, month = {March}, pages = {231-241}, abstract = {Inferences about counterfactuals are essential for prediction, answering "what if" questions, and estimating causal effects. However, when the counterfactuals posed are too far from the data at hand, conclusions drawn from well-specified statistical analyses become based on speculation and convenient but indefensible model assumptions rather than empirical evidence. Unfortunately, standard statistical approaches assume the veracity of the model rather than revealing the degree of model-dependence, and so this problem can be hard to detect. We develop easy-to-apply methods to evaluate counterfactuals that do not require sensitivity testing over specified classes of models. If an analysis fails the tests we offer, then we know that substantive results are sensitive to at least some modeling choices that are not based on empirical evidence. We use these methods to evaluate the extensive scholarly literatures on the effects of changes in the degree of democracy in a country (on any dependent variable) and separate analyses of the effects of UN peacebuilding efforts. We find evidence that many scholars are inadvertently drawing conclusions based more on modeling hypotheses than on their data. For some research questions, history contains insufficient information to be our guide.}, author = {Gary King and Langche Zeng} } @article {King07, title = {An Introduction to the Dataverse Network as an Infrastructure for Data Sharing}, journal = {Sociological Methods and Research}, volume = {36}, year = {2007}, pages = {173{\textendash}199}, abstract = {We introduce a set of integrated developments in web application software, networking, data citation standards, and statistical methods designed to put some of the universe of data and data sharing practices on somewhat firmer ground. We have focused on social science data, but aspects of what we have developed may apply more widely. The idea is to facilitate the public distribution of persistent, authorized, and verifiable data, with powerful but easy-to-use technology, even when the data are confidential or proprietary. We intend to solve some of the sociological problems of data sharing via technological means, with the result intended to benefit both the scientific community and the sometimes apparently contradictory goals of individual researchers.}, author = {Gary King} } @article {HoImaKin07, title = {Matching as Nonparametric Preprocessing for Reducing Model Dependence in Parametric Causal Inference}, journal = {Political Analysis}, volume = {15}, year = {2007}, pages = {199{\textendash}236}, abstract = {Although published works rarely include causal estimates from more than a few model specifications, authors usually choose the presented estimates from numerous trial runs readers never see. Given the often large variation in estimates across choices of control variables, functional forms, and other modeling assumptions, how can researchers ensure that the few estimates presented are accurate or representative? How do readers know that publications are not merely demonstrations that it is possible to find a specification that fits the author{\textquoteright}s favorite hypothesis? And how do we evaluate or even define statistical properties like unbiasedness or mean squared error when no unique model or estimator even exists? Matching methods, which offer the promise of causal inference with fewer assumptions, constitute one possible way forward, but crucial results in this fast-growing methodological literature are often grossly misinterpreted. We explain how to avoid these misinterpretations and propose a unified approach that makes it possible for researchers to preprocess data with matching (such as with the easy-to-use software we offer) and then to apply the best parametric techniques they would have used anyway. This procedure makes parametric models produce more accurate and considerably less model-dependent causal inferences. }, author = {Daniel Ho and Kosuke Imai and Gary King and Elizabeth Stuart} } @article {AltKin07, title = {A Proposed Standard for the Scholarly Citation of Quantitative Data}, journal = {D-Lib Magazine}, volume = {13}, year = {2007}, month = {March / April}, abstract = {An essential aspect of science is a community of scholars cooperating and competing in the pursuit of common goals. A critical component of this community is the common language of and the universal standards for scholarly citation, credit attribution, and the location and retrieval of articles and books. We propose a similar universal standard for citing quantitative data that retains the advantages of print citations, adds other components made possible by, and needed due to, the digital form and systematic nature of quantitative data sets, and is consistent with most existing subfield-specific approaches. Although the digital library field includes numerous creative ideas, we limit ourselves to only those elements that appear ready for easy practical use by scientists, journal editors, publishers, librarians, and archivists.}, url = {http://www.dlib.org/dlib/march07/altman/03altman.html}, author = {Micah Altman and Gary King} } @article {KinGakRav07, title = {A "Politically Robust" Experimental Design for Public Policy Evaluation, with Application to the Mexican Universal Health Insurance Program}, journal = {Journal of Policy Analysis and Management}, volume = {26}, year = {2007}, pages = {479-506}, abstract = {We develop an approach to conducting large scale randomized public policy experiments intended to be more robust to the political interventions that have ruined some or all parts of many similar previous efforts. Our proposed design is insulated from selection bias in some circumstances even if we lose observations and our inferences can still be unbiased even if politics disrupts any two of the three steps in our analytical procedures and and other empirical checks are available to validate the overall design. We illustrate with a design and empirical validation of an evaluation of the Mexican Seguro Popular de Salud (Universal Health Insurance) program we are conducting. Seguro Popular, which is intended to grow to provide medical care, drugs, preventative services, and financial health protection to the 50 million Mexicans without health insurance, is one of the largest health reforms of any country in the last two decades. The evaluation is also large scale, constituting one of the largest policy experiments to date and what may be the largest randomized health policy experiment ever.}, author = {Gary King and Emmanuela Gakidou and Nirmala Ravishankar and Ryan T. Moore and Jason Lakin and Manett Vargas and Martha Mar{\'\i}a T{\'e}llez-Rojo and Juan Eugenio Hern{\'a}ndez {\'A}vila and Mauricio Hern{\'a}ndez {\'A}vila and H{\'e}ctor Hern{\'a}ndez Llamas} } @article {KinZen07, title = {When Can History Be Our Guide? The Pitfalls of Counterfactual Inference}, journal = {International Studies Quarterly}, year = {2007}, month = {March}, pages = {183-210}, abstract = {Inferences about counterfactuals are essential for prediction, answering "what if" questions, and estimating causal effects. However, when the counterfactuals posed are too far from the data at hand, conclusions drawn from well-specified statistical analyses become based on speculation and convenient but indefensible model assumptions rather than empirical evidence. Unfortunately, standard statistical approaches assume the veracity of the model rather than revealing the degree of model-dependence, and so this problem can be hard to detect. We develop easy-to-apply methods to evaluate counterfactuals that do not require sensitivity testing over specified classes of models. If an analysis fails the tests we offer, then we know that substantive results are sensitive to at least some modeling choices that are not based on empirical evidence. We use these methods to evaluate the extensive scholarly literatures on the effects of changes in the degree of democracy in a country (on any dependent variable) and separate analyses of the effects of UN peacebuilding efforts. We find evidence that many scholars are inadvertently drawing conclusions based more on modeling hypotheses than on their data. For some research questions, history contains insufficient information to be our guide.}, author = {Gary King and Langche Zeng} } @article {KinZen06, title = {The Dangers of Extreme Counterfactuals}, journal = {Political Analysis}, volume = {14}, year = {2006}, pages = {131{\textendash}159}, abstract = {We address the problem that occurs when inferences about counterfactuals {\textendash} predictions, "what if" questions, and causal effects {\textendash} are attempted far from the available data. The danger of these extreme counterfactuals is that substantive conclusions drawn from statistical models that fit the data well turn out to be based largely on speculation hidden in convenient modeling assumptions that few would be willing to defend. Yet existing statistical strategies provide few reliable means of identifying extreme counterfactuals. We offer a proof that inferences farther from the data are more model-dependent, and then develop easy-to-apply methods to evaluate how model-dependent our answers would be to specified counterfactuals. These methods require neither sensitivity testing over specified classes of models nor evaluating any specific modeling assumptions. If an analysis fails the simple tests we offer, then we know that substantive results are sensitive to at least some modeling choices that are not based on empirical evidence.}, author = {Gary King and Langche Zeng} } @article {GakKin06, title = {Death by Survey: Estimating Adult Mortality without Selection Bias from Sibling Survival Data}, journal = {Demography}, volume = {43}, year = {2006}, month = {August}, pages = {569{\textendash}585}, abstract = {The widely used methods for estimating adult mortality rates from sample survey responses about the survival of siblings, parents, spouses, and others depend crucially on an assumption that we demonstrate does not hold in real data. We show that when this assumption is violated {\textendash} so that the mortality rate varies with sibship size {\textendash} mortality estimates can be massively biased. By using insights from work on the statistical analysis of selection bias, survey weighting, and extrapolation problems, we propose a new and relatively simple method of recovering the mortality rate with both greatly reduced potential for bias and increased clarity about the source of necessary assumptions.}, author = {Emmanuela Gakidou and Gary King} } @article {King06, title = {Publication, Publication}, journal = {PS: Political Science and Politics}, volume = {39}, year = {2006}, month = {January}, pages = {119{\textendash}125}, abstract = {I show herein how to write a publishable paper by beginning with the replication of a published article. This strategy seems to work well for class projects in producing papers that ultimately get published, helping to professionalize students into the discipline, and teaching them the scientific norms of the free exchange of academic information. I begin by briefly revisiting the prominent debate on replication our discipline had a decade ago and some of the progress made in data sharing since.}, url = {http://gking.harvard.edu/papers}, author = {Gary King} } @article {EpsHoKin05, title = {The Supreme Court During Crisis: How War Affects only Non-War Cases}, journal = {New York University Law Review}, volume = {80}, year = {2005}, month = {April}, pages = {1{\textendash}116}, abstract = {Does the U.S. Supreme Court curtail rights and liberties when the nation{\textquoteright}s security is under threat? In hundreds of articles and books, and with renewed fervor since September 11, 2001, members of the legal community have warred over this question. Yet, not a single large-scale, quantitative study exists on the subject. Using the best data available on the causes and outcomes of every civil rights and liberties case decided by the Supreme Court over the past six decades and employing methods chosen and tuned especially for this problem, our analyses demonstrate that when crises threaten the nation{\textquoteright}s security, the justices are substantially more likely to curtail rights and liberties than when peace prevails. Yet paradoxically, and in contradiction to virtually every theory of crisis jurisprudence, war appears to affect only cases that are unrelated to the war. For these cases, the effect of war and other international crises is so substantial, persistent, and consistent that it may surprise even those commentators who long have argued that the Court rallies around the flag in times of crisis. On the other hand, we find no evidence that cases most directly related to the war are affected. We attempt to explain this seemingly paradoxical evidence with one unifying conjecture: Instead of balancing rights and security in high stakes cases directly related to the war, the Justices retreat to ensuring the institutional checks of the democratic branches. Since rights-oriented and process-oriented dimensions seem to operate in different domains and at different times, and often suggest different outcomes, the predictive factors that work for cases unrelated to the war fail for cases related to the war. If this conjecture is correct, federal judges should consider giving less weight to legal principles outside of wartime but established during wartime, and attorneys should see it as their responsibility to distinguish cases along these lines.}, author = {Lee Epstein and Daniel E. Ho and Gary King and Jeffrey A. Segal} } @article {StoKinZen05, title = {WhatIf: Software for Evaluating Counterfactuals}, journal = {Journal of Statistical Software}, volume = {15}, number = {4}, year = {2005}, pages = {1--18}, abstract = { This article describes\ WhatIf: Software for Evaluating Counterfactuals, an R package that implements the methods for evaluating counterfactuals introduced in King and Zeng (2006a) and King and Zeng (2006b). It offers easy-to-use techniques for assessing a counterfactual{\textquoteright}s model dependence without having to conduct sensitivity testing over specified classes of models. These same methods can be used to approximate the common support of the treatment and control groups in causal inference. }, url = {https://www.jstatsoft.org/article/view/v015i04/0}, author = {Heather Stoll and Gary King and Langchee Zeng} } @article {ImaKin04, title = {Did Illegal Overseas Absentee Ballots Decide the 2000 U.S. Presidential Election?}, journal = {Perspectives on Politics}, volume = {2}, year = {2004}, month = {September}, pages = {537{\textendash}549}, abstract = {Although not widely known until much later, Al Gore received 202 more votes than George W. Bush on election day in Florida. George W. Bush is president because he overcame his election day deficit with overseas absentee ballots that arrived and were counted after election day. In the final official tally, Bush received 537 more votes than Gore. These numbers are taken from the official results released by the Florida Secretary of State{\textquoteright}s office and so do not reflect overvotes, undervotes, unsuccessful litigation, butterfly ballot problems, recounts that might have been allowed but were not, or any other hypothetical divergence between voter preferences and counted votes. After the election, the New York Times conducted a six month long investigation and found that 680 of the overseas absentee ballots were illegally counted, and no partisan, pundit, or academic has publicly disagreed with their assessment. In this paper, we describe the statistical procedures we developed and implemented for the Times to ascertain whether disqualifying these 680 ballots would have changed the outcome of the election. The methods involve adding formal Bayesian model averaging procedures to King{\textquoteright}s (1997) ecological inference model. Formal Bayesian model averaging has not been used in political science but is especially useful when substantive conclusions depend heavily on apparently minor but indefensible model choices, when model generalization is not feasible, and when potential critics are more partisan than academic. We show how we derived the results for the Times so that other scholars can use these methods to make ecological inferences for other purposes. We also present a variety of new empirical results that delineate the precise conditions under which Al Gore would have been elected president, and offer new evidence of the striking effectiveness of the Republican effort to convince local election officials to count invalid ballots in Bush counties and not count them in Gore counties.}, author = {Kosuke Imai and Gary King} } @article {King04, title = {EI: A Program for Ecological Inference}, journal = {Journal of Statistical Software}, volume = {11}, year = {2004}, url = {http://www.jstatsoft.org/index.php?vol=11}, author = {Gary King} } @article {KinMurSal04, title = {Enhancing the Validity and Cross-cultural Comparability of Measurement in Survey Research}, journal = {American Political Science Review}, volume = {98}, year = {2004}, month = {February}, pages = {191{\textendash}207}, abstract = {We address two long-standing survey research problems: measuring complicated concepts, such as political freedom or efficacy, that researchers define best with reference to examples and and what to do when respondents interpret identical questions in different ways. Scholars have long addressed these problems with approaches to reduce incomparability, such as writing more concrete questions {\textendash} with uneven success. Our alternative is to measure directly response category incomparability and to correct for it. We measure incomparability via respondents{\textquoteright} assessments, on the same scale as the self-assessments to be corrected, of hypothetical individuals described in short vignettes. Since actual levels of the vignettes are invariant over respondents, variability in vignette answers reveals incomparability. Our corrections require either simple recodes or a statistical model designed to save survey administration costs. With analysis, simulations, and cross-national surveys, we show how response incomparability can drastically mislead survey researchers and how our approach can fix them.}, author = {Gary King and Christopher J.L. Murray and Joshua A. Salomon and Ajay Tandon} } @article {King04b, title = {Finding New Information for Ecological Inference Models: A Comment on Jon Wakefield, {\textquoteright}Ecological Inference in 2X2 Tables{\textquoteright}}, journal = {Journal of the Royal Statistical Society}, volume = {167}, year = {2004}, pages = {437}, author = {Gary King} } @article {BecKinZen04, title = {Theory and Evidence in International Conflict: A Response to de Marchi, Gelpi, and Grynaviski}, volume = {98}, year = {2004}, month = {May}, pages = {379-389}, abstract = {We thank Scott de Marchi, Christopher Gelpi, and Jeffrey Grynaviski (2003 and hereinafter dGG) for their careful attention to our work (Beck, King, and Zeng, 2000 and hereinafter BKZ) and for raising some important methodological issues that we agree deserve readers{\textquoteright} attention. We are pleased that dGG{\textquoteright}s analyses are consistent with the theoretical conjecture about international conflict put forward in BKZ {\textendash}- "The causes of conflict, theorized to be important but often found to be small or ephemeral, are indeed tiny for the vast majority of dyads, but they are large stable and replicable whenever the ex ante probability of conflict is large" (BKZ, p.21) {\textendash}- and that dGG agree with our main methodological point that out-of-sample forecasting performance should always be one of the standards used to judge studies of international conflict, and indeed most other areas of political science. However, dGG frequently err when they draw methodological conclusions. Their central claim involves the superiority of logit over neural network models for international conflict data, as judged by forecasting performance and other properties such as ease of use and interpretation ("neural networks hold few unambiguous advantages... and carry significant costs" relative to logit and dGG, p.14). We show here that this claim, which would be regarded as stunning in any of the diverse fields in which both methods are more commonly used, is false. We also show that dGG{\textquoteright}s methodological errors and the restrictive model they favor cause them to miss and mischaracterize crucial patterns in the causes of international conflict. We begin in the next section by summarizing the growing support for our conjecture about international conflict. The second section discusses the theoretical reasons why neural networks dominate logistic regression, correcting a number of methodological errors. The third section then demonstrates empirically, in the same data as used in BKZ and dGG, that neural networks substantially outperform dGG{\textquoteright}s logit model. We show that neural networks improve on the forecasts from logit as much as logit improves on a model with no theoretical variables. We also show how dGG{\textquoteright}s logit analysis assumed, rather than estimated, the answer to the central question about the literature{\textquoteright}s most important finding, the effect of democracy on war. Since this and other substantive assumptions underlying their logit model are wrong, their substantive conclusion about the democratic peace is also wrong. The neural network models we used in BKZ not only avoid these difficulties, but they, or one of the other methods available that do not make highly restrictive assumptions about the exact functional form, are just what is called for to study the observable implications of our conjecture.}, author = {Nathaniel Beck and Gary King and Langche Zeng} } @article {GilKin04, title = {What to do When Your Hessian is Not Invertible: Alternatives to Model Respecification in Nonlinear Estimation}, journal = {Sociological Methods and Research}, volume = {32}, year = {2004}, month = {August}, pages = {54-87}, abstract = {What should a researcher do when statistical analysis software terminates before completion with a message that the Hessian is not invertable? The standard textbook advice is to respecify the model, but this is another way of saying that the researcher should change the question being asked. Obviously, however, computer programs should not be in the business of deciding what questions are worthy of study. Although noninvertable Hessians are sometimes signals of poorly posed questions, nonsensical models, or inappropriate estimators, they also frequently occur when information about the quantities of interest exists in the data, through the likelihood function. We explain the problem in some detail and lay out two preliminary proposals for ways of dealing with noninvertable Hessians without changing the question asked.}, author = {Jeff Gill and Gary King} } @article {AdoKin03, title = {Analyzing Second Stage Ecological Regressions}, journal = {Political Analysis}, volume = {11}, year = {2003}, month = {Winter}, pages = {65-76}, author = {Christopher Adolph and Gary King} } @article {KinLow03, title = {An Automated Information Extraction Tool For International Conflict Data with Performance as Good as Human Coders: A Rare Events Evaluation Design}, journal = {International Organization}, volume = {57}, year = {2003}, month = {July}, pages = {617-642}, abstract = {Despite widespread recognition that aggregated summary statistics on international conflict and cooperation miss most of the complex interactions among nations, the vast majority of scholars continue to employ annual, quarterly, or occasionally monthly observations. Daily events data, coded from some of the huge volume of news stories produced by journalists, have not been used much for the last two decades. We offer some reason to change this practice, which we feel should lead to considerably increased use of these data. We address advances in event categorization schemes and software programs that automatically produce data by "reading" news stories without human coders. We design a method that makes it feasible for the first time to evaluate these programs when they are applied in areas with the particular characteristics of international conflict and cooperation data, namely event categories with highly unequal prevalences, and where rare events (such as highly conflictual actions) are of special interest. We use this rare events design to evaluate one existing program, and find it to be as good as trained human coders, but obviously far less expensive to use. For large scale data collections, the program dominates human coding. Our new evaluative method should be of use in international relations, as well as more generally in the field of computational linguistics, for evaluating other automated information extraction tools. We believe that the data created by programs similar to the one we evaluated should see dramatically increased use in international relations research. To facilitate this process, we are releasing with this article data on 4.3 million international events, covering the entire world for the last decade.}, author = {Gary King and Will Lowe} } @article {EpsKin03, title = {Building An Infrastructure for Empirical Research in the Law}, journal = {Journal of Legal Education}, volume = {53}, year = {2003}, pages = {311{\textendash}320}, abstract = {In every discipline in which "empirical research" has become commonplace, scholars have formed a subfield devoted to solving the methodological problems unique to that discipline{\textquoteright}s data and theoretical questions. Although students of economics, political science, psychology, sociology, business, education, medicine, public health, and so on primarily focus on specific substantive questions, they cannot wait for those in other fields to solve their methoodological problems or to teach them "new" methods, wherever they were initially developed. In "The Rules of Inference," we argued for the creation of an analogous methodological subfield devoted to legal scholarship. We also had two other objectives: (1) to adapt the rules of inference used in the natural and social sciences, which apply equally to quantitative and qualitative research, to the special needs, theories, and data in legal scholarship, and (2) to offer recommendations on how the infrastructure of teaching and research at law schools might be reorganized so that it could better support the creation of first-rate quantitative and qualitative empirical research without compromising other important objectives. Published commentaries on our paper, along with citations to it, have focused largely on the first-our application of the rules of inference to legal scholarship. Until now, discussions of our second goal-suggestions for the improvement of legal scholarship, as well as our argument for the creation of a group that would focus on methodological problems unique to law-have been relegated to less public forums, even though, judging from the volume of correspondence we have received, they seem to be no less extensive.}, author = {Lee Epstein and Gary King} } @article {AdoKinHer03, title = {A Consensus on Second Stage Analyses in Ecological Inference Models}, journal = {Political Analysis}, volume = {11}, year = {2003}, month = {Winter}, pages = {86{\textendash}94}, abstract = {Since Herron and Shotts (2003a and hereinafter HS), Adolph and King (2003 andhereinafter AK), and Herron and Shotts (2003b and hereinafter HS2), the four of us have iterated many more times, learned a great deal, and arrived at a consensus on this issue. This paper describes our joint recommendations for how to run second-stage ecological regressions, and provides detailed analyses to back up our claims.}, author = {Christopher Adolph and Gary King and Kenneth W. Shotts and Michael C. Herron} } @article {King03, title = {The Future of Replication}, journal = {International Studies Perspectives}, volume = {4}, year = {2003}, month = {February}, pages = {443{\textendash}499}, abstract = {Since the replication standard was proposed for political science research, more journals have required or encouraged authors to make data available, and more authors have shared their data. The calls for continuing this trend are more persistent than ever, and the agreement among journal editors in this Symposium continues this trend. In this article, I offer a vision of a possible future of the replication movement. The plan is to implement this vision via the Virtual Data Center project, which {\textendash} by automating the process of finding, sharing, archiving, subsetting, converting, analyzing, and distributing data {\textendash} may greatly facilitate adherence to the replication standard.}, author = {Gary King} } @article {TomKinZen03, title = {ReLogit: Rare Events Logistic Regression}, journal = {Journal of Statistical Software}, volume = {8}, year = {2003}, note = {Abstract published in Journal of Computational and Graphical Statistics, Vol. 12, No. 1 (March, 2003): 246-247. }, url = {http://gking.harvard.edu/relogit}, author = {Michael Tomz and Gary King and Langche Zeng} } @article {LowKin03, title = {Some Statistical Methods for Evaluating Information Extraction Systems}, journal = {Proceedings of the 10th Conference of the European Chapter of the Association for Computational Linguistics}, year = {2003}, pages = {19-26}, abstract = {We present new statistical methods for evaluating information extraction systems. The methods were developed to evaluate a system used by political scientists to extract event information from news leads about international politics. The nature of this data presents two problems for evaluators: 1) the frequency distribution of event types in international event data is strongly skewed, so a random sample of newsleads will typically fail to contain any low frequency events. 2) Manual information extraction necessary to create evaluation sets is costly, and most effort is wasted coding high frequency categories . We present an evaluation scheme that overcomes these problems with considerably less manual effort than traditional methods, and also allows us to interpret an information extraction system as an estimator (in the statistical sense) and to estimate its bias.}, author = {Will Lowe and Gary King} } @article {MurKinLop02, title = {Armed Conflict as a Public Health Problem}, journal = {BMJ (British Medical Journal)}, volume = {324}, year = {2002}, month = {February 9}, pages = {346{\textendash}349}, abstract = {Armed conflict is a major cause of injury and death worldwide, but we need much better methods of quantification before we can accurately assess its effect. Armed conflict between warring states and groups within states have been major causes of ill health and mortality for most of human history. Conflict obviously causes deaths and injuries on the battlefield, but also health consequences from the displacement of populations, the breakdown of health and social services, and the heightened risk of disease transmission. Despite the size of the health consequences, military conflict has not received the same attention from public health research and policy as many other causes of illness and death. In contrast, political scientists have long studied the causes of war but have primarily been interested in the decision of elite groups to go to war, not in human death and misery. We review the limited knowledge on the health consequences of conflict, suggest ways to improve measurement, and discuss the potential for risk assessment and for preventing and ameliorating the consequences of conflict.}, author = {Christopher J.L. Murray and Gary King and Alan D. Lopez and Niels Tomijima and Etienne Krug} } @article {EpsKin02b, title = {Empirical Research and The Goals of Legal Scholarship: A Response}, journal = {University of Chicago Law Review}, volume = {69}, year = {2002}, month = {Winter}, pages = {1{\textendash}209}, abstract = {Although the term "empirical research" has become commonplace in legal scholarship over the past two decades, law professors have, in fact, been conducting research that is empirical {\textendash} that is, learning about the world using quantitative data or qualitative information {\textendash} for almost as long as they have been conducting research. For just as long, however, they have been proceeding with little awareness of, much less compliance with, the rules of inference, and without paying heed to the key lessons of the revolution in empirical analysis that has been taking place over the last century in other disciplines. The tradition of including some articles devoted to exclusively to the methododology of empirical analysis {\textendash} so well represented in journals in traditional academic fields {\textendash} is virtually nonexistent in the nation{\textquoteright}s law reviews. As a result, readers learn considerably less accurate information about the empirical world than the studies{\textquoteright} stridently stated, but overconfident, conclusions suggest. To remedy this situation both for the producers and consumers of empirical work, this Article adapts the rules of inference used in the natural and social sciences to the special needs, theories, and data in legal scholarship, and explicate them with extensive illustrations from existing research. The Article also offers suggestions for how the infrastructure of teaching and research at law schools might be reorganized so that it can better support the creation of first-rate empirical research without compromising other important objectives.}, author = {Lee Epstein and Gary King} } @article {KinZen02b, title = {Estimating Risk and Rate Levels, Ratios, and Differences in Case-Control Studies}, journal = {Statistics in Medicine}, volume = {21}, year = {2002}, pages = {1409{\textendash}1427}, abstract = {Classic (or "cumulative") case-control sampling designs do not admit inferences about quantities of interest other than risk ratios, and then only by making the rare events assumption. Probabilities, risk differences, and other quantities cannot be computed without knowledge of the population incidence fraction. Similarly, density (or "risk set") case-control sampling designs do not allow inferences about quantities other than the rate ratio. Rates, rate differences, cumulative rates, risks, and other quantities cannot be estimated unless auxiliary information about the underlying cohort such as the number of controls in each full risk set is available. Most scholars who have considered the issue recommend reporting more than just the relative risks and rates, but auxiliary population information needed to do this is not usually available. We address this problem by developing methods that allow valid inferences about all relevant quantities of interest from either type of case-control study when completely ignorant of or only partially knowledgeable about relevant auxiliary population information.}, author = {Gary King and Langche Zeng} } @article {HonKinKat02, title = {A Fast, Easy, and Efficient Estimator for Multiparty Electoral Data}, journal = {Political Analysis}, volume = {10}, year = {2002}, month = {Winter}, pages = {84{\textendash}100}, abstract = {Katz and King (1999) develop a model for predicting or explaining aggregate electoral results in multiparty democracies. This model is, in principle, analogous to what least squares regression provides American politics researchers in that two-party system. Katz and King applied this model to three-party elections in England and revealed a variety of new features of incumbency advantage and where each party pulls support from. Although the mathematics of their statistical model covers any number of political parties, it is computationally very demanding, and hence slow and numerically imprecise, with more than three. The original goal of our work was to produce an approximate method that works quicker in practice with many parties without making too many theoretical compromises. As it turns out, the method we offer here improves on Katz and King{\textquoteright}s (in bias, variance, numerical stability, and computational speed) even when the latter is computationally feasible. We also offer easy-to-use software that implements our suggestions.}, author = {James Honaker and Gary King and Jonathan N. Katz} } @article {King02b, title = {Isolating Spatial Autocorrelation, Aggregation Bias, and Distributional Violations in Ecological Inference}, journal = {Political Analysis}, volume = {10}, year = {2002}, month = {Summer}, pages = {298{\textendash}300}, abstract = {This is an invited response to an article by Anselin and Cho. I make two main points: The numerical results in this article violate no conclusions from prior literature, and the absence of the deterministic information from the bounds in the article{\textquoteright}s analyses invalidates its theoretical discussion of spatial autocorrelation and all of its actual simulation results. An appendix shows how to draw simulations correctly.}, author = {Gary King} } @article {GakKin02, title = {Measuring Total Health Inequality: Adding Individual Variation to Group-Level Differences}, journal = {BioMed Central: International Journal for Equity in Health}, volume = {1}, year = {2002}, month = {August}, abstract = {Background: Studies have revealed large variations in average health status across social, economic, and other groups. No study exists on the distribution of the risk of ill-health across individuals, either within groups or across all people in a society, and as such a crucial piece of total health inequality has been overlooked. Some of the reason for this neglect has been that the risk of death, which forms the basis for most measures, is impossible to observe directly and difficult to estimate. Methods: We develop a measure of total health inequality {\textendash} encompassing all inequalities among people in a society, including variation between and within groups {\textendash} by adapting a beta-binomial regression model. We apply it to children under age two in 50 low- and middle-income countries. Our method has been adopted by the World Health Organization and is being implemented in surveys around the world and preliminary estimates have appeared in the World Health Report (2000). Results: Countries with similar average child mortality differ considerably in total health inequality. Liberia and Mozambique have the largest inequalities in child survival, while Colombia, the Philippines and Kazakhstan have the lowest levels among the countries measured. Conclusions: Total health inequality estimates should be routinely reported alongside average levels of health in populations and groups, as they reveal important policy-related information not otherwise knowable. This approach enables meaningful comparisons of inequality across countries and future analyses of the determinants of inequality.}, author = {Emmanuela Gakidou and Gary King} } @article {KinMur02, title = {Rethinking Human Security}, journal = {Political Science Quarterly}, volume = {116}, year = {2002}, month = {Winter}, pages = {585{\textendash}610}, abstract = {In the last two decades, the international community has begun to conclude that attempts to ensure the territorial security of nation-states through military power have failed to improve the human condition. Despite astronomical levels of military spending, deaths due to military conflict have not declined. Moreover, even when the borders of some states are secure from foreign threats, the people within those states do not necessarily have freedom from crime, enough food, proper health care, education, or political freedom. In response to these developments, the international community has gradually moved to combine economic development with military security and other basic human rights to form a new concept of "human security". Unfortunately, by common assent the concept lacks both a clear definition, consistent with the aims of the international community, and any agreed upon measure of it. In this paper, we propose a simple, rigorous, and measurable definition of human security: the expected number of years of future life spent outside the state of "generalized poverty". Generalized poverty occurs when an individual falls below the threshold in any key domain of human well-being. We consider improvements in data collection and methods of forecasting that are necessary to measure human security and then introduce an agenda for research and action to enhance human security that follows logically in the areas of risk assessment, prevention, protection, and compensation.}, author = {Gary King and Christopher J.L. Murray} } @article {EpsKin02, title = {The Rules of Inference}, journal = {University of Chicago Law Review}, volume = {69}, year = {2002}, note = {with comments from six scholars and a rejoinder by us.}, month = {Winter}, pages = {1{\textendash}209}, abstract = {Although the term "empirical research" has become commonplace in legal scholarship over the past two decades, law professors have, in fact, been conducting research that is empirical {\textendash} that is, learning about the world using quantitative data or qualitative information {\textendash} for almost as long as they have been conducting research. For just as long, however, they have been proceeding with little awareness of, much less compliance with, the rules of inference, and without paying heed to the key lessons of the revolution in empirical analysis that has been taking place over the last century in other disciplines. The tradition of including some articles devoted to exclusively to the methododology of empirical analysis {\textendash} so well represented in journals in traditional academic fields {\textendash} is virtually nonexistent in the nation{\textquoteright}s law reviews. As a result, readers learn considerably less accurate information about the empirical world than the studies{\textquoteright} stridently stated, but overconfident, conclusions suggest. To remedy this situation both for the producers and consumers of empirical work, this Article adapts the rules of inference used in the natural and social sciences to the special needs, theories, and data in legal scholarship, and explicate them with extensive illustrations from existing research. The Article also offers suggestions for how the infrastructure of teaching and research at law schools might be reorganized so that it can better support the creation of first-rate empirical research without compromising other important objectives.}, author = {Lee Epstein and Gary King} } @article {AltKinSig01, title = {Aggregation Among Binary, Count, and Duration Models: Estimating the Same Quantities from Different Levels of Data}, journal = {Political Analysis}, volume = {9}, year = {2001}, month = {Winter}, pages = {21{\textendash}44}, abstract = {Binary, count and duration data all code discrete events occurring at points in time. Although a single data generation process can produce all of these three data types, the statistical literature is not very helpful in providing methods to estimate parameters of the same process from each. In fact, only single theoretical process exists for which know statistical methods can estimate the same parameters - and it is generally used only for count and duration data. The result is that seemingly trivial decisions abut which level of data to use can have important consequences for substantive interpretations. We describe the theoretical event process for which results exist, based on time independence. We also derive a set of models for a time-dependent process and compare their predictions to those of a commonly used model. Any hope of understanding and avoiding the more serious problems of aggregation bias in events data is contingent on first deriving a much wider arsenal of statistical models and theoretical processes that are not constrained by the particular forms of data that happen to be available. We discuss these issues and suggest an agenda for political methodologists interested in this very large class of aggregation problems.}, author = {James E. Alt and Gary King and Signorino, Curtis} } @article {KinHonJos01, title = {Analyzing Incomplete Political Science Data: An Alternative Algorithm for Multiple Imputation}, journal = {American Political Science Review}, volume = {95}, year = {2001}, month = {March}, pages = {49{\textendash}69}, abstract = {We propose a remedy for the discrepancy between the way political scientists analyze data with missing values and the recommendations of the statistics community. Methodologists and statisticians agree that "multiple imputation" is a superior approach to the problem of missing data scattered through one{\textquoteright}s explanatory and dependent variables than the methods currently used in applied data analysis. The discrepancy occurs because the computational algorithms used to apply the best multiple imputation models have been slow, difficult to implement, impossible to run with existing commercial statistical packages, and have demanded considerable expertise. We adapt an algorithm and use it to implement a general-purpose, multiple imputation model for missing data. This algorithm is considerably easier to use than the leading method recommended in statistics literature. We also quantify the risks of current missing data practices, illustrate how to use the new procedure, and evaluate this alternative through simulated data as well as actual empirical examples. Finally, we offer easy-to-use that implements our suggested methods. (Software: AMELIA)}, author = {Gary King and James Honaker and Anne Joseph and Kenneth Scheve} } @article {RosJiaKin01, title = {Bayesian and Frequentist Inference for Ecological Inference: The RxC Case}, journal = {Statistica Neerlandica}, volume = {55}, year = {2001}, pages = {134{\textendash}156}, abstract = {In this paper we propose Bayesian and frequentist approaches to ecological inference, based on R x C contingency tables, including a covariate. The proposed Bayesian model extends the binomial-beta hierarchical model developed by King, Rosen and Tanner (1999) from the 2 x 2 case to the R x C case, the inferential procedure employs Markov chain Monte Carlo (MCMC) methods. As such the resulting MCMC analysis is rich but computationally intensive. The frequentist approach, based on first moments rather than on the entire likelihood, provides quick inference via nonlinear least-squares, while retaining good frequentist properties. The two approaches are illustrated with simulated data, as well as with real data on voting patterns in Weimar Germany. In the final section of the paper we provide an overview of a range of alternative inferential approaches which trade-off computational intensity for statistical efficiency.}, author = {Ori Rosen and Wenxin Jiang and Gary King and Martin A. Tanner} } @article {AltAndDig01a, title = {A Digital Library for the Dissemination and Replication of Quantitative Social Science Research}, journal = {Social Science Computer Review}, volume = {19}, year = {2001}, month = {Winter}, pages = {458{\textendash}470}, abstract = {The Virtual Data Center (VDC) software is an open-source, digital library system for quantitative data. We discuss what the software does, and how it provides an infrastructure for the management and dissemination of disturbed collections of quantitative data, and the replication of results derived from this data.}, author = {Micah Altman and Leonid Andreev and Mark Diggory and Gary King and Daniel L. Kiskis and Elizabeth Kolster and Michael Krot and Sidney Verba} } @article {KinZen01b, title = {Explaining Rare Events in International Relations}, journal = {International Organization}, volume = {55}, year = {2001}, month = {Summer}, pages = {693{\textendash}715}, abstract = {Some of the most important phenomena in international conflict are coded s "rare events data," binary dependent variables with dozens to thousands of times fewer events, such as wars, coups, etc., than "nonevents". Unfortunately, rare events data are difficult to explain and predict, a problem that seems to have at least two sources. First, and most importantly, the data collection strategies used in international conflict are grossly inefficient. The fear of collecting data with too few events has led to data collections with huge numbers of observations but relatively few, and poorly measured, explanatory variables. As it turns out, more efficient sampling designs exist for making valid inferences, such as sampling all available events (e.g., wars) and a tiny fraction of non-events (peace). This enables scholars to save as much as 99\% of their (non-fixed) data collection costs, or to collect much more meaningful explanatory variables. Second, logistic regression, and other commonly used statistical procedures, can underestimate the probability of rare events. We introduce some corrections that outperform existing methods and change the estimates of absolute and relative risks by as much as some estimated effects reported in the literature. We also provide easy-to-use methods and software that link these two results, enabling both types of corrections to work simultaneously.}, author = {Gary King and Langche Zeng} } @article {KinZen02, title = {Improving Forecasts of State Failure}, journal = {World Politics}, volume = {53}, year = {2001}, month = {July}, pages = {623{\textendash}658}, abstract = {We offer the first independent scholarly evaluation of the claims, forecasts, and causal inferences of the State Failure Task Force and their efforts to forecast when states will fail. State failure refers to the collapse of the authority of the central government to impose order, as in civil wars, revolutionary wars, genocides, politicides, and adverse or disruptive regime transitions. This task force, set up at the behest of Vice President Gore in 1994, has been led by a group of distinguished academics working as consultants to the U.S. Central Intelligence Agency. State Failure Task Force reports and publications have received attention in the media, in academia, and from public policy decision-makers. In this article, we identify several methodological errors in the task force work that cause their reported forecast probabilities of conflict to be too large, their causal inferences to be biased in unpredictable directions, and their claims of forecasting performance to be exaggerated. However, we also find that the task force has amassed the best and most carefully collected data on state failure in existence, and the required corrections which we provide, although very large in effect, are easy to implement. We also reanalyze their data with better statistical procedures and demonstrate how to improve forecasting performance to levels significantly greater than even corrected versions of their models. Although still a highly uncertain endeavor, we are as a consequence able to offer the first accurate forecasts of state failure, along with procedures and results that may be of practical use in informing foreign policy decision making. We also describe a number of strong empirical regularities that may help in ascertaining the causes of state failure. }, author = {Gary King and Langche Zeng} } @article {AltAndDigKinKisKolKroVer01, title = {An Introduction to the Virtual Data Center Project and Software}, journal = {Proceedings of The First ACM+IEEE Joint Conference on Digital Libraries}, year = {2001}, pages = {203{\textendash}204}, publisher = {ACM Press}, author = {Micah Altman and Leonid Andreev and Mark Diggory and Gary King and Elizabeth Kolster and Krot, M. and Sidney Verba and Daniel L. Kiskis} } @article {KinZen01, title = {Logistic Regression in Rare Events Data}, journal = {Political Analysis}, volume = {9}, year = {2001}, month = {Spring}, pages = {137{\textendash}163}, abstract = {We study rare events data, binary dependent variables with dozens to thousands of times fewer ones (events, such as wars, vetoes, cases of political activism, or epidemiological infections) than zeros ("nonevents"). In many literatures, these variables have proven difficult to explain and predict, a problem that seems to have at least two sources. First, popular statistical procedures, such as logistic regression, can sharply underestimate the probability of rare events. We recommend corrections that outperform existing methods and change the estimates of absolute and relative risks by as much as some estimated effects reported in the literature. Second, commonly used data collection strategies are grossly inefficient for rare events data. The fear of collecting data with too few events has led to data collections with huge numbers of observations but relatively few, and poorly measured, explanatory variables, such as in international conflict data with more than a quarter-million dyads, only a few of which are at war. As it turns out, more efficient sampling designs exist for making valid inferences, such as sampling all variable events (e.g., wars) and a tiny fraction of nonevents (peace). This enables scholars to save as much as 99\% of their (nonfixed) data collection costs or to collect much more meaningful explanatory variables. We provide methods that link these two results, enabling both types of corrections to work simultaneously, and software that implements the methods developed.}, author = {Gary King and Langche Zeng} } @article {AltAndDig01b, title = {An Overview of the Virtual Data Center Project and Software}, journal = {JCDL {\textquoteright}01: First Joint Conference on Digital Libraries}, year = {2001}, pages = {203-204}, abstract = { Software is now superseded by Dataverse. In this paper, we present an overview of the Virtual Data Center (VDC) software, an open-source digital library system for the management and dissemination of distributed collections of quantitative data. (see http://TheData.org). The VDC functionality provides everything necessary to maintain and disseminate an individual collection of research studies, including facilities for the storage, archiving, cataloging, translation, and on-line analysis of a particular collection. Moreover, the system provides extensive support for distributed and federated collections including: location-independent naming of objects, distributed authentication and access control, federated metadata harvesting, remote repository caching, and distributed "virtual" collections of remote objects. }, author = {Micah Altman and Leonid Andreev and Mark Diggory and Gary King and Daniel L. Kiskis and Elizabeth Kolster and Michael Krot and Sidney Verba} } @article {King01, title = {Proper Nouns and Methodological Propriety: Pooling Dyads in International Relations Data}, journal = {International Organization}, volume = {55}, year = {2001}, month = {Fall}, pages = {497{\textendash}507}, abstract = {The intellectual stakes at issue in this symposium are very high: Green, Kim, and Yoon (2000 and hereinafter GKY) apply their proposed methodological prescriptions and conclude that they key findings in the field is wrong and democracy "has no effect on militarized disputes." GKY are mainly interested in convincing scholars about their methodological points and see themselves as having no stake in the resulting substantive conclusions. However, their methodological points are also high stakes claims: if correct, the vast majority of statistical analyses of military conflict ever conducted would be invalidated. GKY say they "make no attempt to break new ground statistically," but, as we will see, this both understates their methodological contribution to the field and misses some unique features of their application and data in international relations. On the ltter, GKY{\textquoteright}s critics are united: Oneal and Russett (2000) conclude that GKY{\textquoteright}s method "produces distorted results," and show even in GKY{\textquoteright}s framework how democracy{\textquoteright}s effect can be reinstated. Beck and Katz (2000) are even more unambiguous: "GKY{\textquoteright}s conclusion, in table 3, that variables such as democracy have no pacific impact, is simply nonsense...GKY{\textquoteright}s (methodological) proposal...is NEVER a good idea." My given task is to sort out and clarify these conflicting claims and counterclaims. The procedure I followed was to engage in extensive discussions with the participants that included joint reanalyses provoked by our discussions and passing computer program code (mostly with Monte Carlo simulations) back and forth to ensure we were all talking about the same methods and agreed with the factual results. I learned a great deal from this process and believe that the positions of the participants are now a lot closer than it may seem from their written statements. Indeed, I believe that all the participants now agree with what I have written here, even though they would each have different emphases (and although my believing there is agreement is not the same as there actually being agreement!).}, author = {Gary King} } @article {King00, title = {Geography, Statistics, and Ecological Inference}, journal = {Annals of the Association of American Geographers}, volume = {90}, year = {2000}, month = {September}, pages = {601{\textendash}606}, abstract = {I am grateful for such thoughtful review from these three distinguished geographers. Fotheringham provides an excellent summary of the approach offered, including how it combines the two methods that have dominated applications (and methodological analysis) for nearly half a century{\textendash} the method of bounds (Duncan and Davis, 1953) and Goodman{\textquoteright}s (1953) least squares regression. Since Goodman{\textquoteright}s regression is the only method of ecological inference "widely used in Geography" (O{\textquoteright}Loughlin), adding information that is known to be true from the method of bounds (for each observation) would seem to have the chance to improve a lot of research in this field. The other addition that EI provides is estimates at the lowest level of geography available, making it possible to map results, instead of giving only single summary numbers for the entire geographic region. Whether one considers the combined method offered "the" solution (as some reviewers and commentators have portrayed it), "a" solution (as I tried to describe it), or, perhaps better and more simply, as an improved method of ecological inference, is not importatnt. The point is that more data are better, and this method incorporates more. I am gratified that all three reviewers seem to support these basic points. In this response, I clarify a few points, correct some misunderstandings, and present additional evidence. I conclude with some possible directions for future research.}, author = {Gary King} } @article {BecKinZen00, title = {Improving Quantitative Studies of International Conflict: A Conjecture}, journal = {American Political Science Review}, volume = {94}, year = {2000}, month = {March}, pages = {21{\textendash}36}, abstract = {We address a well-known but infrequently discussed problem in the quantitative study of international conflict: Despite immense data collections, prestigious journals, and sophisticated analyses, empirical findings in the literature on international conflict are often unsatisfying. Many statistical results change from article to article and specification to specification. Accurate forecasts are nonexistant. In this article we offer a conjecture about one source of this problem: The causes of conflict, theorized to be important but often found to be small or ephemeral, are indeed tiny for the vast majority of dyads, but they are large, stable, and replicable wherever the ex ante probability of conflict is large. This simple idea has an unexpectedly rich array of observable implications, all consistent with the literature. We directly test our conjecture by formulating a statistical model that includes critical features. Our approach, a version of a "neural network" model, uncovers some interesting structural features of international conflict, and as one evaluative measure, forecasts substantially better than any previous effort. Moreover, this improvement comes at little cost, and it is easy to evaluate whether the model is a statistical improvement over the simpler models commonly used.}, author = {Nathaniel Beck and Gary King and Langche Zeng} } @article {KinTomWit00, title = {Making the Most of Statistical Analyses: Improving Interpretation and Presentation}, journal = {American Journal of Political Science}, volume = {44}, year = {2000}, month = {April}, pages = {341{\textendash}355}, abstract = {Social Scientists rarely take full advantage of the information available in their statistical results. As a consequence, they miss opportunities to present quantities that are of greatest substantive interest for their research and express the appropriate degree of certainty about these quantities. In this article, we offer an approach, built on the technique of statistical simulation, to extract the currently overlooked information from any statistical method and to interpret and present it in a reader-friendly manner. Using this technique requires some expertise, which we try to provide herein, but its application should make the results of quantitative articles more informative and transparent. To illustrate our recommendations, we replicate the results of several published works, showing in each case how the authors{\textquoteright} own conclusions can be expressed more sharply and informatively, and, without changing any data or statistical assumptions, how our approach reveals important new information about the research questions at hand. We also offer very easy-to-use Clarify software that implements our suggestions.}, url = {http://gking.harvard.edu/files/abs/making-abs.shtml}, author = {Gary King and Michael Tomz and Jason Wittenberg} } @article {KinRosTan99, title = {Binomial-Beta Hierarchical Models for Ecological Inference}, journal = {Sociological Methods and Research}, volume = {28}, year = {1999}, month = {August}, pages = {61{\textendash}90}, abstract = {The authors develop binomial-beta hierarchical models for ecological inference using insights from the literature on hierarchical models based on Markov chain Monte Carlo algorithms and King{\textquoteright}s ecological inference model. The new approach reveals some features of the data that King{\textquoteright}s approach does not, can easily be generalized to more complicated problems such as general R x C tables, allows the data analyst to adjust for covariates, and provides a formal evaluation of the significance of the covariates. It may also be better suited to cases in which the observed aggregate cells are estimated from very few observations or have some forms of measurement error. This article also provides an example of a hierarchical model in which the statistical idea of "borrowing strength" is used not merely to increase the efficiency of the estimates but to enable the data analyst to obtain estimates.}, author = {Gary King and Ori Rosen and Martin A. Tanner} } @article {King99, title = {The Future of Ecological Inference Research: A Reply to Freedman et al.}, journal = {Journal of the American Statistical Association}, volume = {94}, year = {1999}, month = {March}, pages = {352-355}, abstract = {I appreciate the editor{\textquoteright}s invitation to reply to Freedman et al.{\textquoteright}s (1998) review of "A Solution to the Ecological Inference Problem: Reconstructing Individual Behavior from Aggregate Data" (Princeton University Press.) I welcome this scholarly critique and JASA{\textquoteright}s decision to publish in this field. Ecological inference is a large and very important area for applications that is especially rich with open statistical questions. I hope this discussion stimulates much new scholarship. Freedman et al. raise several interesting issues, but also misrepresent or misunderstand the prior literature, my approach, and their own empirical analyses, and compound the problem, by refusing requests from me and the editor to make their data and software available for this note. Some clarification is thus in order.}, author = {Gary King} } @article {KinLav99, title = {Many Publications, but Still No Evidence}, journal = {Electoral Studies}, volume = {18}, year = {1999}, month = {December}, pages = {597{\textendash}598}, abstract = {In 1990, Budge and Hofferbert (B\&H) claimed that they had found solid evidence that party platforms cause U.S. budgetary priorities, and thus concluded that mandate theory applies in the United States as strongly as it does elsewhere. The implications of this stunning conclusion would mean that virtually every observer of the American party system in this century has been wrong. King and Laver (1993) reanalyzed B\&H{\textquoteright}s data and demonstrated in two ways that there exists no evidence for a causal relationship. First, accepting their entire statistical model, and correcting only an algebraic error (a mistake in how they computed their standard errors), we showed that their hypothesized relationship holds up in fewer than half the tests they reported. Second, we showed that their statistical model includes a slightly hidden but politically implausible assumption that a new party achieves every budgetary desire immediately upon taking office. We then specified a model without this unrealistic assumption and we found that the assumption was not supported, and that all evidence in the data for platforms causing government budgets evaporated. In their published response to our article, B\&H withdrew their key claim and said they were now (in 1993) merely interested in an association and not causation. That is how it was left in 1993{\textemdash}a perfectly amicable resolution as far as we were concerned{\textemdash}since we have no objection to the claim that there is a non-causal or chance association between any two variables. Of course, we see little reason to be interested in non-causal associations in this area any more than in the chance correlation that exists between the winner of the baseball World Series and the party winning the U.S. presidency. Since party mandate theory only makes sense as a causal theory, the conventional wisdom about America{\textquoteright}s porous, non-mandate party system stands.}, author = {Gary King and Michael Laver} } @article {LewKin99, title = {No Evidence on Directional vs. Proximity Voting}, journal = {Political Analysis}, volume = {8}, year = {1999}, month = {August}, pages = {21{\textendash}33}, abstract = {The directional and proximity models offer dramatically different theories for how voters make decisions and fundamentally divergent views of the supposed microfoundations on which vast bodies of literature in theoretical rational choice and empirical political behavior have been built. We demonstrate here that the empirical tests in the large and growing body of literature on this subject amount to theoretical debates about which statistical assumption is right. The key statistical assumptions have not been empirically tested and, indeed, turn out to be effectively untestable with exiting methods and data. Unfortunately, these assumptions are also crucial since changing them leads to different conclusions about voter processes.}, author = {Jeffrey Lewis and Gary King} } @article {GelKinLiu99, title = {Not Asked and Not Answered: Multiple Imputation for Multiple Surveys}, journal = {Journal of the American Statistical Association}, volume = {93}, year = {1999}, month = {September}, pages = {846{\textendash}857}, abstract = {We present a method of analyzing a series of independent cross-sectional surveys in which some questions are not answered in some surveys and some respondents do not answer some of the questions posed. The method is also applicable to a single survey in which different questions are asked or different sampling methods are used in different strata or clusters. Our method involves multiply imputing the missing items and questions by adding to existing methods of imputation designed for single surveys a hierarchical regression model that allows covariates at the individual and survey levels. Information from survey weights is exploited by including in the analysis the variables on which the weights are based, and then reweighting individual responses (observed and imputed) to estimate population quantities. We also develop diagnostics for checking the fit of the imputation model based on comparing imputed data to nonimputed data. We illustrate with the example that motivated this project: a study of pre-election public opinion polls in which not all the questions of interest are asked in all the surveys, so that it is infeasible to impute within each survey separately.}, author = {Andrew Gelman and Gary King and Chuanhai Liu} } @article {KatKin99, title = {A Statistical Model for Multiparty Electoral Data}, journal = {American Political Science Review}, volume = {93}, year = {1999}, month = {March}, pages = {15{\textendash}32}, abstract = {We propose a comprehensive statistical model for analyzing multiparty, district-level elections. This model, which provides a tool for comparative politics research analagous to that which regression analysis provides in the American two-party context, can be used to explain or predict how geographic distributions of electoral results depend upon economic conditions, neighborhood ethnic compositions, campaign spending, and other features of the election campaign or aggregate areas. We also provide new graphical representations for data exploration, model evaluation, and substantive interpretation. We illustrate the use of this model by attempting to resolve a controversy over the size of and trend in electoral advantage of incumbency in Britain. Contrary to previous analyses, all based on measures now known to be biased, we demonstrate that the advantage is small but meaningful, varies substantially across the parties, and is not growing. Finally, we show how to estimate the party from which each party{\textquoteright}s advantage is predominantly drawn.}, author = {Jonathan Katz and Gary King} } @article {GelKinBos98, title = {Estimating the Probability of Events that Have Never Occurred: When Is Your Vote Decisive?}, journal = {Journal of the American Statistical Association}, volume = {93}, year = {1998}, month = {March}, pages = {1{\textendash}9}, abstract = {Researchers sometimes argue that statisticians have little to contribute when few realizations of the process being estimated are observed. We show that this argument is incorrect even in the extreme situation of estimating the probabilities of events so rare that they have never occurred. We show how statistical forecasting models allow us to use empirical data to improve inferences about the probabilities of these events. Our application is estimating the probability that your vote will be decisive in a U.S. presidential election, a problem that has been studied by political scientists for more than two decades. The exact value of this probability is of only minor interest, but the number has important implications for understanding the optimal allocation of campaign resources, whether states and voter groups receive their fair share of attention from prospective presidents, and how formal "rational choice" models of voter behavior might be able to explain why people vote at all. We show how the probability of a decisive vote can be estimated empirically from state-level forecasts of the presidential election and illustrate with the example of 1992. Based on generalizations of standard political science forecasting models, we estimate the (prospective) probability of a single vote being decisive as about 1 in 10 million for close national elections such as 1992, varying by about a factor of 10 among states. Our results support the argument that subjective probabilities of many types are best obtained through empirically based statistical prediction models rather than solely through mathematical reasoning. We discuss the implications of our findings for the types of decision analyses used in public choice studies.}, author = {Andrew Gelman and Gary King and John Boscardin} } @article {KinPal98, title = {The Record of American Democracy, 1984-1990}, journal = {Sociological Methods and Research}, volume = {26}, year = {1998}, note = { Also PS: Political Science and Politics, Vol. XXX, No. 4 (December, 1997): 746-747; and ICPSR Bulletin, Vol. XVIII, No. 4 (May, 1998): 1-3.}, month = {February}, pages = {424{\textendash}427}, url = {http://www.hmdc.harvard.edu/ROAD}, author = {Gary King and Bradley Palmquist} } @article {KinSig96, title = {The Generalization in the Generalized Event Count Model, With Comments on Achen, Amato, and Londregan}, journal = {Political Analysis}, volume = {6}, year = {1996}, pages = {225{\textendash}252}, abstract = {We use an analogy with the normal distribution and linear regression to demonstrate the need for the Generalize Event Count (GEC) model. We then show how the GEC provides a unified framework within which to understand a diversity of distributions used to model event counts, and how to express the model in one simple equation. Finally, we address the points made by Christopher Achen, Timothy Amato, and John Londregan. Amato{\textquoteright}s and Londregan{\textquoteright}s arguments are consistent with ours and provide additional interesting information and explanations. Unfortunately, the foundation on which Achen built his paper turns out to be incorrect, rendering all his novel claims about the GEC false (or in some cases irrelevant).}, author = {Gary King and Curtis S. Signorino} } @article {BenKin96, title = {A Preview of EI and EzI: Programs for Ecological Inference}, journal = {Social Science Computer Review}, volume = {14}, year = {1996}, month = {Winter}, pages = {433{\textendash}438}, abstract = {Ecological inference, as traditionally defined, is the process of using aggregate (i.e., "ecological") data to infer discrete individual-level relationships of interest when individual-level data are not available. Existing methods of ecological inference generate very inaccurate conclusions about the empirical world- which thus gives rise to the ecological inference problem. Most scholars who analyze aggregate data routinely encounter some form of this problem. EI (by Gary King) and EzI (by Kenneth Benoit and Gary King) are freely available software that implement the statistical and graphical methods detailed in Gary King{\textquoteright}s book A Solution to the Ecological Inference Problem. These methods make it possible to infer the attributes of individual behavior from aggregate data. EI works within the statistics program Gauss and will run on any computer hardware and operating system that runs Gauss (the Gauss module, CML, or constrained maximum likelihood- by Ronald J. Schoenberg- is also required). EzI is a menu-oriented stand-alone version of the program that runs under MS-DOS (and soon Windows 95, OS/2, and HP-UNIX). EI allows users to make ecological inferences as part of the powerful and open Gauss statistical environment. In contrast, EzI requires no additional software, and provides an attractive menu-based user interface for non-Gauss users, although it lacks the flexibility afforded by the Gauss version. Both programs presume that the user has read or is familiar with A Solution to the Ecological Inference Problem.}, author = {Kenneth Benoit and Gary King} } @article {King96, title = {Why Context Should Not Count}, journal = {Political Geography}, volume = {15}, year = {1996}, pages = {159{\textendash}164}, abstract = {This paper is an invited comment on a paper by John Agnew. I largely agree with Agnew{\textquoteright}s comments and thus focus on remaining areas wehre an alternative perspective might be useful. My argument is that political geographers should not be so concerned with demonstrating that context matters. My reasoning is based on three arguments. First, in fact context rarely counts (Section 1) and, second, the most productive practical goal for political researchers should be to show that it does not count (Section 2). Finally, a disproportionate focus on {\textquoteleft}context counting{\textquoteright} can lead, and has led, to some seriosu problems in practical research situations, such as attempting to give theoretical answers to empirical questions (Section 3) and empirical answers to theoretical questions (Section 4).}, author = {Gary King} } @article {WinSigKin95, title = {A Correction for an Underdispersed Event Count Probability Distribution}, journal = {Political Analysis}, year = {1995}, pages = {215{\textendash}228}, abstract = {We demonstrate that the expected value and variance commonly given for a well-known probability distribution are incorrect. We also provide corrected versions and report changes in a computer program to account for the known practical uses of this distribution.}, author = {Rainer Winkelmann and Signorino, Curtis and Gary King} } @article {KinKeoVer95, title = {The Importance of Research Design in Political Science}, journal = {American Political Science Review}, volume = {89}, year = {1995}, note = {, a response to five authors in the symposium "The Qualitative-Quantitative Disputation: Gary King, Robert O. Keohane, and Sidney Verba{\textquoteright}s Designing Social Inquiry: Scientific Inference in Qualitative Research"}, month = {June}, pages = {454{\textendash}481}, abstract = {Receiving five serious reviews in this symposium is gratifying and confirms our belief that research design should be a priority for our discipline. We are pleased that our five distinguished reviewers appear to agree with our unified approach to the logic of inference in the social sciences, and with our fundamental point: that good quantitative and good qualitative research designs are based fundamentally on the same logic of inference. The reviewers also raised virtually no objections to the main practical contribution of our book{\textendash} our many specific procedures for avoiding bias, getting the most out of qualitative data, and making reliable inferences. However, the reviews make clear that although our book may be the latest word on research design in political science, it is surely not the last. We are taxed for failing to include important issues in our analysis and for dealing inadequately with some of what we included. Before responding to the reviewers{\textquoteright} more direct criticisms, let us explain what we emphasize in Designing Social Inquiry and how it relates to some of the points raised by the reviewers.}, author = {Gary King and Robert O. Keohane and Sidney Verba} } @article {VosGelKin95, title = {Pre-Election Survey Methodology: Details From Nine Polling Organizations, 1988 and 1992}, journal = {Public Opinion Quarterly}, volume = {59}, year = {1995}, month = {Spring}, pages = {98{\textendash}132}, abstract = {Before every presidential election, journalists, pollsters, and politicians commission dozens of public opinion polls. Although the primary function of these surveys is to forecast the election winners, they also generate a wealth of political data valuable even after the election. These preelection polls are useful because they are conducted with such frequency that they allow researchers to study change in estimates of voter opinion within very narrow time increments (Gelman and King 1993). Additionally, so many are conducted that the cumulative sample size of these polls is large enough to construct aggregate measures of public opinion within small demographic or geographical groupings (Wright, Erikson, and McIver 1985).These advantages, however, are mitigated by the decentralized origin of the many preelection polls. The surveys are conducted by diverse private enterprises with procedures that differ significantly. Moreover, important methodological detail does not appear in the public record. Codebooks provided by the survey organizations are all incomplete; many are outdated and most are at least partly inaccurate. The most recent treatment in the academic literature, by Brady and Orren (1992), discusses the approach used by three companies but conceals their identities and omits most of the detail. ...}, author = {D. Steven Voss and Andrew Gelman and Gary King} } @article {King95, title = {Replication, Replication}, journal = {PS: Political Science and Politics}, volume = {28}, year = {1995}, note = {See updates to this paper\ for how I use this paper as a class assignment now.}, month = {September}, pages = {444-452}, abstract = {Political science is a community enterprise and the community of empirical political scientists need access to the body of data necessary to replicate existing studies to understand, evaluate, and especially build on this work. Unfortunately, the norms we have in place now do not encourage, or in some cases even permit, this aim. Following are suggestions that would facilitate replication and are easy to implement {\textendash} by teachers, students, dissertation writers, graduate programs, authors, reviewers, funding agencies, and journal and book editors.}, author = {Gary King} } @article {King95b, title = {A Revised Proposal, Proposal}, journal = {PS: Political Science and Politics}, volume = {XXVIII}, year = {1995}, month = {September}, pages = {494{\textendash}499}, author = {Gary King} } @article {GelKin94b, title = {Enhancing Democracy Through Legislative Redistricting}, journal = {American Political Science Review}, volume = {88}, year = {1994}, note = {, Parts reprinted in California Policy Studies Brief, a publication of the California Policy Seminar, Vol. 7, No. 5 (April, 1995).}, month = {September}, pages = {541{\textendash}559}, abstract = {We demonstrate the surprising benefits of legislative redistricting (including partisan gerrymandering) for American representative democracy. In so doing, our analysis resolves two long-standing controversies in American politics. First, whereas some scholars believe that redistricting reduces electoral responsiveness by protecting incumbents, others, that the relationship is spurious, we demonstrate that both sides are wrong: redistricting increases responsiveness. Second, while some researchers believe that gerrymandering dramatically increases partisan bias and others deny this effect, we show both sides are in a sense correct. Gerrymandering biases electoral systems in favor of the party that controls the redistricting as compared to what would have happened if the other party controlled it, but any type of redistricting reduces partisan bias as compared to an electoral system without redistricting. Incorrect conclusions in both literatures resulted from misjudging the enormous uncertainties present during redistricting periods, making simplified assumptions about the redistricters{\textquoteright} goals, and using inferior statistical methods.}, author = {Andrew Gelman and Gary King} } @article {AltKin94, title = {Transfers of Governmental Power: The Meaning of Time Dependence}, journal = {Comparative Political Studies}, volume = {27}, year = {1994}, month = {July}, pages = {190{\textendash}210}, abstract = {King, Alt, Burns, and Laver (1990) proposed and estimated a unified model in which cabinet durations depended on seven explanatory variables reflecting features of the cabinets and the bargaining environments in which they formed, along with a stochastic component in which the risk of a cabinet falling was treated as a constant across its tenure. Two recent research reports take issue with one aspect of this model. Warwick and Easton replicate the earlier findings for explanatory variables but claim that the stochastic risk should be seen as rising, and at a rate which varies, across the life of the cabinet. Bienen and van de Walle, using data on the duration of leaders, allege that random risk is falling. We continue in our goal of unifying this literature by providing further estimates with both cabinet and leader duration data that confirm the original explanatory variables{\textquoteright} effects, showing that leaders{\textquoteright} durations are affected by many of the same factors that affect the durability of the cabinets they lead, demonstrating that cabinets have stochastic risk of ending that is indeed constant across the theoretically most interesting range of durations, and suggesting that stochastic risk for leaders in countries with cabinet government is, if not constant, more likely to rise than fall.}, author = {James E. Alt and Gary King} } @article {GelKin94, title = {A Unified Method of Evaluating Electoral Systems and Redistricting Plans}, journal = {American Journal of Political Science}, volume = {38}, year = {1994}, month = {May}, pages = {514{\textendash}554}, abstract = {We derive a unified statistical method with which one can produce substantially improved definitions and estimates of almost any feature of two-party electoral systems that can be defined based on district vote shares. Our single method enables one to calculate more efficient estimates, with more trustworthy assessments of their uncertainty, than each of the separate multifarious existing measures of partisan bias, electoral responsiveness, seats-votes curves, expected or predicted vote in each district in a legislature, the probability that a given party will win the seat in each district, the proportion of incumbents or others who will lose their seats, the proportion of women or minority candidates to be elected, the incumbency advantage and other causal effects, the likely effects on the electoral system and district votes of proposed electoral reforms, such as term limitations, campaign spending limits, and drawing majority-minority districts, and numerous others. To illustrate, we estimate the partisan bias and electoral responsiveness of the U.S. House of Representatives since 1900 and evaluate the fairness of competing redistricting plans for the 1992 Ohio state legislature.}, author = {Andrew Gelman and Gary King} } @article {KinWal93, title = {Good Research and Bad Research: Extending Zimile{\textquoteright}s Criticism}, journal = {Early Childhood Research Quarterly}, volume = {8}, year = {1993}, month = {September}, pages = {397{\textendash}401}, abstract = {Herbert Zimiles has written a provocative article on quantitative research. Because his specific critiques of research on infant day care are nominal examples of his much broader arguments, we focus only on his general methodological perspectives in this brief comment. We write as methodologists, a qualitative researcher with a quantitative background (Walsh) and a quantitative researcher completing a book on qualitative research (King and see King, Keohane \& Verba, in preparation).}, author = {Gary King and Daniel J. Walsh} } @article {KinLav93, title = {On Party Platforms, Mandates, and Government Spending}, journal = {American Political Science Review}, volume = {87}, year = {1993}, month = {September}, pages = {744{\textendash}750}, abstract = {In their 1990 Review article, Ian Budge and Richard Hofferbert analyzed the relationship between party platform emphases, control of the White House, and national government spending priorities, reporting strong evidence of a "party mandate" connection between them. Gary King and Michael Laver successfully replicate the original analysis, critique the interpretation of the causal effects, and present a reanalysis showing that platforms have small or nonexistent effects on spending. In response, Budge, Hofferbert, and Michael McDonald agree that their language was somewhat inconsistent on both interactions and causality but defend their conceptualization of "mandates" as involving only an association, not necessarily a causal connection, between party commitments and government policy. Hence, while the causes of government policy are of interest, noncausal associations are sufficient as evidence of party mandates in American politics.}, author = {Gary King and Michael Laver} } @article {KingBruGil93, title = {The Science of Political Science Graduate Admissions}, journal = {PS: Political Science and Politics}, volume = {XXVI}, year = {1993}, month = {December}, pages = {772{\textendash}778}, abstract = {As political scientists, we spend much time teaching and doing scholarly research, and more time than we may wish to remember on university committees. However, just as many of us believe that teaching and research are not fundamentally different activities, we also need not use fundamentally different standards of inference when studying government, policy, and politics than when participating in the governance of departments and universities. In this article, we describe our attempts to bring somewhat more systematic methods to the process and policies of graduate admissions.}, author = {Gary King and John M. Bruce and Michael Gilligan} } @article {GelKin93, title = {Why are American Presidential Election Campaign Polls so Variable when Votes are so Predictable?}, journal = {British Journal of Political Science}, volume = {23}, year = {1993}, month = {October}, pages = {409{\textendash}451}, abstract = {As most political scientists know, the outcome of the U.S. Presidential election can be predicted within a few percentage points (in the popular vote), based on information available months before the election. Thus, the general election campaign for president seems irrelevant to the outcome (except in very close elections), despite all the media coverage of campaign strategy. However, it is also well known that the pre-election opinion polls can vary wildly over the campaign, and this variation is generally attributed to events in the campaign. How can campaign events affect people{\textquoteright}s opinions on whom they plan to vote for, and yet not affect the outcome of the election? For that matter, why do voters consistently increase their support for a candidate during his nominating convention, even though the conventions are almost entirely predictable events whose effects can be rationally forecast? In this exploratory study, we consider several intuitively appealing, but ultimately wrong, resolutions to this puzzle, and discuss our current understanding of what causes opinion polls to fluctuate and yet reach a predictable outcome. Our evidence is based on graphical presentation and analysis of over 67,000 individual-level responses from forty-nine commercial polls during the 1988 campaign and many other aggregate poll results from the 1952{\textendash}1992 campaigns. We show that responses to pollsters during the campaign are not generally informed or even, in a sense we describe, "rational." In contrast, voters decide which candidate to eventually support based on their enlightened preferences, as formed by the information they have learned during the campaign, as well as basic political cues such as ideology and party identification. We cannot prove this conclusion, but we do show that it is consistent with the aggregate forecasts and individual-level opinion poll responses. Based on the enlightened preferences hypothesis, we conclude that the news media have an important effect on the outcome of Presidential elections{\textendash}-not due to misleading advertisements, sound bites, or spin doctors, but rather by conveying candidates{\textquoteright} positions on important issues.}, author = {Andrew Gelman and Gary King} } @article {King91e, title = {Calculating Standard Errors of Predicted Values based on Nonlinear Functional Forms}, journal = {The Political Methodologist}, volume = {4}, year = {1991}, month = {Fall}, abstract = {Whenever we report predicted values, we should also report some measure of the uncertainty of these estimates. In the linear case, this is relatively simple, and the answer well-known, but with nonlinear models the answer may not be apparent. This short article shows how to make these calculations. I first present this for the familiar linear case, also reviewing the two forms of uncertainty in these estimates, and then show how to calculate these for any arbitrary function. An example appears last.\ }, author = {Gary King} } @article {King91b, title = {Constituency Service and Incumbency Advantage}, journal = {British Journal of Political Science}, volume = {21}, year = {1991}, month = {January}, pages = {119{\textendash}128}, abstract = {This Note addresses the long-standing discrepancy between scholarly support for the effect of constituency service on incumbency advantage and a large body of contradictory empirical evidence. I show first that many of the methodological problems noticed in past research reduce to a single methodological problem that is readily resolved. The core of this Note then provides among the first systematic empirical evidence for the constituency service hypothesis. Specifically, an extra $10,000 added to the budget of the average state legislator gives this incumbent an additional 1.54 percentage points in the next election (with a 95\% confidence interval of 1.14 to 1.94 percentage points).}, author = {Gary King} } @article {King91c, title = {On Political Methodology}, journal = {Political Analysis}, volume = {2}, year = {1991}, pages = {1{\textendash}30}, abstract = {"Politimetrics" (Gurr 1972), "polimetrics" (Alker 1975), "politometrics" (Hilton 1976), "political arithmetic" (Petty [1672] 1971), "quantitative Political Science (QPS)," "governmetrics," "posopolitics" (Papayanopoulos 1973), "political science statistics (Rai and Blydenburgh 1973), "political statistics" (Rice 1926). These are some of the names that scholars have used to describe the field we now call "political methodology." The history of political methodology has been quite fragmented until recently, as reflected by this patchwork of names. The field has begun to coalesce during the past decade and we are developing persistent organizations, a growing body of scholarly literature, and an emerging consensus about important problems that need to be solved. I make one main point in this article: If political methodology is to play an important role in the future of political science, scholars will need to find ways of representing more interesting political contexts in quantitative analyses. This does not mean that scholars should just build more and more complicated statistical models. Instead, we need to represent more of the essence of political phenomena in our models. The advantage of formal and quantitative approaches is that they are abstract representations of the political world and are, thus, much clearer. We need methods that enable us to abstract the right parts of the phenomenon we are studying and exclude everything superfluous. Despite the fragmented history of quantitative political analysis, a version of this goal has been voiced frequently by both quantitative researchers and their critics (Sec. 2). However, while recognizing this shortcoming, earlier scholars were not in the position to rectify it, lacking the mathematical and statistical tools and, early on, the data. Since political methodologists have made great progress in these and other areas in recent years, I argue that we are now capable of realizing this goal. In section 3, I suggest specific approaches to this problem. Finally, in section 4, I provide two modern examples, ecological inference and models of spatial autocorrelation, to illustrate these points.}, author = {Gary King} } @article {King91d, title = {Stochastic Variation: A Comment on Lewis-Beck and Skalaban{\textquoteright}s {\textquoteright}The R-Square{\textquoteright}}, journal = {Political Analysis}, volume = {2}, year = {1991}, pages = {185{\textendash}200}, abstract = {In an interesting and provocative article, Michael Lewis-Beck and Andrew Skalaban make an important contribution by emphasizing several philosophical issues in political methodology that have received too little attention from methodologists and quantitative researchers. These issues involve the role of systematic, and especially stochastic, variation in statistical models. After briefly discussing a few points of disagreement, hoping to reduce them to points of clarification, I turn to the philosophical issues. Examples with real data follow.}, author = {Gary King} } @article {KinGel91, title = {Systemic Consequences of Incumbency Advantage in the U.S. House}, journal = {American Journal of Political Science}, volume = {35}, year = {1991}, month = {February}, pages = {110{\textendash}138}, abstract = {The dramatic increase in the electoral advantage of incumbency has sparked widespread interest among congressional researchers over the last 15 years. Although many scholars have studied the advantages of incumbency for incumbents, few have analyzed its effects on the underlying electoral system. We examine the influence of the incumbency advantage on two features of the electoral system in the U.S. House elections: electoral responsiveness and partisan bias. Using a district-level seats-votes model of House elections, we are able to distinguish systematic changes from unique, election-specific variations. Our results confirm the significant drop in responsiveness, and even steeper decline outside the South, over the past 40 years. Contrary to expectations, we find that increased incumbency advantage explains less than a third of this trend, indicating that some other unknown factor is responsible. Moreover, our analysis also reveals another dramatic pattern, largely overlooked in the congressional literature: in the 1940{\textquoteright}s and 1950{\textquoteright}s the electoral system was severely biased in favor of the Republican party. The system shifted incrementally from this severe Republican bias over the next several decades to a moderate Democratic bias by the mid-1980{\textquoteright}s. Interestingly, changes in incumbency advantage explain virtually all of this trend in partisan bias since the 1940{\textquoteright}s. By removing incumbency advantage and the existing configuration of incumbents and challengers analytically, our analysis reveals an underlying electoral system that remains consistently biased in favor of the Republican party. Thus, our results indicate that incumbency advantage affects the underlying electoral system, but contrary to conventional wisdom, this changes the trend in partisan bias more than electoral responsiveness.}, author = {Gary King and Andrew Gelman} } @article {King91, title = {{\textquoteright}Truth{\textquoteright} is Stranger than Prediction, More Questionable Than Causal Inference}, journal = {American Journal of Political Science}, volume = {35}, year = {1991}, month = {November}, pages = {1047{\textendash}1053}, abstract = {Robert Luskin{\textquoteright}s article in this issue provides a useful service by appropriately qualifying several points I made in my 1986 American Journal of Political Science article. Whereas I focused on how to avoid common mistakes in quantitative political sciences, Luskin clarifies ways to extract some useful information from usually problematic statistics: correlation coefficients, standardized coefficients, and especially R2. Since these three statistics are very closely related (and indeed deterministic functions of one another in some cases), I focus in this discussion primarily on R2, the most widely used and abused. Luskin also widens the discussion to various kinds of specification tests, a general issue I also address. In fact, as Beck (1991) reports, a large number of formal specification tests are just functions of R2, with differences among them primarily due to how much each statistic penalizes one for including extra parameters and fewer observations. Quantitative political scientists often worry about model selection and specification, asking questions about parameter identification, autocorrelated or heteroscedastic disturbances, parameter constancy, variable choice, measurement error, endogeneity, functional forms, stochastic assumptions, and selection bias, among numerous others. These model specification questions are all important, but we may have forgotten why we pose them. Political scientists commonly give three reasons: (1) finding the "true" model, or the "full" explanation and (2) prediction and and (3) estimating specific causal effects. I argue here that (1) is used the most but useful the least and (2) is very useful but not usually in political science where forecasting is not often a central concern and and (3) correctly represents the goals of political scientists and should form the basis of most of our quantitative empirical work.}, author = {Gary King} } @article {King90, title = {Electoral Responsiveness and Partisan Bias in Multiparty Democracies}, journal = {Legislative Studies Quarterly}, volume = {XV}, year = {1990}, month = {May}, pages = {159{\textendash}181}, abstract = {Because the goals of local and national representation are inherently incompatible, there is an uncertain relationship between aggregates of citizen votes and the national allocation of legislative seats in almost all democracies. In particular electoral systems, this uncertainty leads to diverse configurations of electoral responsiveness and partisian bias, two fundamental concepts in empirical democratic theory. This paper unifies virtually all existing multiyear seats-votes models as special cases of a new general model. It also permits the first formalization of, and reliable method for empirically estimating, electoral responsiveness and partisian bias in electoral systems with any number of political parties. I apply this model to data from nine democratic countries, revealing clear patterns in responsiveness and bias across different types of electoral rules.}, author = {Gary King} } @article {GelKin90b, title = {Estimating Incumbency Advantage Without Bias}, journal = {American Journal of Political Science}, volume = {34}, year = {1990}, month = {November}, pages = {1142{\textendash}1164}, abstract = {In this paper we prove theoretically and demonstrate empirically that all existing measures of incumbency advantage in the congressional elections literature are biased or inconsistent. We then provide an unbiased estimator based on a very simple linear regression model. We apply this new method to congressional elections since 1900, providing the first evidence of a positive incumbency advantage in the first half of the century.}, author = {Andrew Gelman and Gary King} } @article {GelKin90, title = {Estimating the Electoral Consequences of Legislative Redistricting}, journal = {Journal of the American Statistical Association}, volume = {85}, year = {1990}, month = {June}, pages = {274{\textendash}282}, abstract = {We analyze the effects of redistricting as revealed in the votes received by the Democratic and Republican candidates for state legislature. We develop measures of partisan bias and the responsiveness of the composition of the legislature to changes in statewide votes. Our statistical model incorporates a mixed hierarchical Bayesian and non-Bayesian estimation, requiring simulation along the lines of Tanner and Wong (1987). This model provides reliable estimates of partisan bias and responsiveness along with measures of their variabilities from only a single year of electoral data. This allows one to distinguish systematic changes in the underlying electoral system from typical election-to-election variability.}, author = {Andrew Gelman and Gary King} } @article {AnsKin90, title = {Measuring the Consequences of Delegate Selection Rules in Presidential Nominations}, journal = {Journal of Politics}, volume = {52}, year = {1990}, month = {May}, pages = {609{\textendash}621}, abstract = {In this paper, we formalize existing normative criteria used to judge presidential selection contests by modeling the translation of citizen votes in primaries and caucuses into delegates to the national party conventions. We use a statistical model that enables us to separate the form of electoral responsiveness in presidential selection systems, as well as the degree of bias toward each of the candidates. We find that (1) the Republican nomination system is more responsive to changes in citizen votes than the Democratic system and (2) non-PR primaries are always more responsive than PR primaries and (3) surprisingly, caucuses are more proportional than even primaries held under PR rules and (4) significant bias in favor of a candidate was a good prediction of the winner of the nomination contest. We also (5) evaluate the claims of Ronald Reagan in 1976 and Jesse Jackson in 1988 that the selection systems were substantially biased against their candidates. We find no evidence to support Reagan{\textquoteright}s claim, but substantial evidence that Jackson was correct.}, author = {Stephen Ansolabehere and Gary King} } @article {KinAltBur90, title = {A Unified Model of Cabinet Dissolution in Parliamentary Democracies}, journal = {American Journal of Political Science}, volume = {34}, year = {1990}, month = {August}, pages = {846{\textendash}871}, abstract = {The literature on cabinet duration is split between two apparently irreconcilable positions. The attributes theorists seek to explain cabinet duration as a fixed function of measured explanatory variables, while the events process theorists model cabinet durations as a product of purely stochastic processes. In this paper we build a unified statistical model that combines the insights of these previously distinct approaches. We also generalize this unified model, and all previous models, by including (1) a stochastic component that takes into account the censoring that occurs as a result of governments lasting to the vicinity of the maximum constitutional interelection period, (2) a systematic component that precludes the possibility of negative duration predictions, and (3) a much more objective and parsimonious list of explanatory variables, the explanatory power of which would not be improved by including a list of indicator variables for individual countries.}, author = {Gary King and James Alt and Nancy Burns and Michael Laver} } @article {GelKin89, title = {Electoral Responsiveness in U.S. Congressional Elections, 1946-1986}, journal = {Proceedings of the Social Statistics Section, American Statistical Association}, year = {1989}, pages = {208}, author = {Andrew Gelman and Gary King} } @article {King89c, title = {Event Count Models for International Relations: Generalizations and Applications}, journal = {International Studies Quarterly}, volume = {33}, year = {1989}, month = {June}, pages = {123{\textendash}147}, abstract = {International relations theorists tend to think in terms of continuous processes. Yet we observe only discrete events, such as wars or alliances, and summarize them in terms of the frequency of occurrence. As such, most empirical analyses in international relations are based on event count variables. Unfortunately, analysts have generally relied on statistical techniques that were designed for continuous data. This mismatch between theory and method has caused bias, inefficiency, and numerous inconsistencies in both theoretical arguments and empirical findings throughout the literature. This article develops a much more powerful approach to modeling and statistical analysis based explicity on estimating continuous processes from observed event counts. To demonstrate this class of models, I present several new statistical techniques developed for and applied to different areas of international relations. These include the influence of international alliances on the outbreak of war, the contagious process of multilateral economic sanctions, and reciprocity in superpower conflict. I also show how one can extract considerably more information from existing data and relate substantive theory to empirical analyses more explicitly with this approach.}, author = {Gary King} } @article {King89b, title = {Representation Through Legislative Redistricting: A Stochastic Model}, journal = {American Journal of Political Science}, volume = {33}, year = {1989}, month = {November}, pages = {787{\textendash}824}, abstract = {This paper builds a stochastic model of the processes that give rise to observed patterns of representation and bias in congressional and state legislative elections. The analysis demonstrates that partisan swing and incumbency voting, concepts from the congressional elections literature, have determinate effects on representation and bias, concepts from the redistricting literature. The model shows precisely how incumbency and increased variability of partisan swing reduce the responsiveness of the electoral system and how partisan swing affects whether the system is biased toward one party or the other. Incumbency, and other causes of unresponsive representation, also reduce the effect of partisan swing on current levels of partisan bias. By relaxing the restrictive portions of the widely applied "uniform partisan swing" assumption, the theoretical analysis leads directly to an empirical model enabling one more reliably to estimate responsiveness and bias from a single year of electoral data. Applying this to data from seven elections in each of six states, the paper demonstrates that redistricting has effects in predicted directions in the short run: partisan gerrymandering biases the system in favor of the party in control and, by freeing up seats held by opposition party incumbents, increases the system{\textquoteright}s responsiveness. Bipartisan-controlled redistricting appears to reduce bias somewhat and dramatically to reduce responsiveness. Nonpartisan redistricting processes substantially increase responsiveness but do not have as clear an effect on bias. However, after only two elections, prima facie evidence for redistricting effects evaporate in most states. Finally, across every state and type of redistricting process, responsiveness declined significantly over the course of the decade. This is clear evidence that the phenomenon of "vanishing marginals," recognized first in the U.S. Congress literature, also applies to these different types of state legislative assemblies. It also strongly suggests that redistricting could not account for this pattern.}, author = {Gary King} } @article {King89e, title = {A Seemingly Unrelated Poisson Regression Model}, journal = {Sociological Methods and Research}, volume = {17}, year = {1989}, month = {February}, pages = {235{\textendash}255}, abstract = {This article introduces a new estimator for the analysis of two contemporaneously correlated endogenous event count variables. This seemingly unrelated Poisson regression model (SUPREME) estimator combines the efficiencies created by single equation Poisson regression model estimators and insights from "seemingly unrelated" linear regression models.}, author = {Gary King} } @article {King89d, title = {Variance Specification in Event Count Models: From Restrictive Assumptions to a Generalized Estimator}, journal = {American Journal of Political Science}, volume = {33}, year = {1989}, month = {August}, pages = {762{\textendash}784}, abstract = {This paper discusses the problem of variance specification in models for event count data. Event counts are dependent variables that can take on only nonnegative integer values, such as the number of wars or coups d{\textquoteright}etat in a year. I discuss several generalizations of the Poisson regression model, presented in King (1988), to allow for substantively interesting stochastic processes that do not fit into the Poisson framework. Individual models that cope with, and help analyze, heterogeneity, contagion, and negative contagion are each shown to lead to specific statistical models for event count data. In addition, I derive a new generalized event count (GEC) model that enables researchers to extract significant amounts of new information from existing data by estimating features of these unobserved substantive processes. Applications of this model to congressional challenges of presidential vetoes and superpower conflict demonstrate the dramatic advantages of this approach.}, author = {Gary King} } @article {King88, title = {Statistical Models for Political Science Event Counts: Bias in Conventional Procedures and Evidence for The Exponential Poisson Regression Model}, journal = {American Journal of Political Science}, volume = {32}, year = {1988}, month = {August}, pages = {838-863}, abstract = {This paper presents analytical, Monte Carlo, and empirical evidence on models for event count data. Event counts are dependent variables that measure the number of times some event occurs. Counts of international events are probably the most common, but numerous examples exist in every empirical field of the discipline. The results of the analysis below strongly suggest that the way event counts have been analyzed in hundreds of important political science studies have produced statistically and substantively unreliable results. Misspecification, inefficiency, bias, inconsistency, insufficiency, and other problems result from the unknowing application of two common methods that are without theoretical justification or empirical unity in this type of data. I show that the exponential Poisson regression (EPR) model provides analytically, in large samples, and empirically, in small, finite samples, a far superior model and optimal estimator. I also demonstrate the advantage of this methodology in an application to nineteenth-century party switching in the U.S. Congress. Its use by political scientists is strongly encouraged.}, author = {Gary King} } @article {KinBro87, title = {Democratic Representation and Partisan Bias in Congressional Elections}, journal = {American Political Science Review}, volume = {81}, year = {1987}, month = {December}, pages = {1252{\textendash}1273}, abstract = {The translation of citizen votes into legislative seats is of central importance in democratic electoral systems. It has been a longstanding concern among scholars in political science and in numerous other disciplines. Through this literature, two fundamental tenets of democratic theory, partisan bias and democratic representation, have often been confused. We develop a general statistical model of the relationship between votes and seats and separate these two important concepts theoretically and empirically. In so doing, we also solve several methodological problems with the study of seats, votes and the cube law. An application to U.S. congressional districts provides estimates of bias and representation for each state and deomonstrates the model{\textquoteright}s utility. Results of this application show distinct types of representation coexisting in U.S. states. Although most states have small partisan biases, there are some with a substantial degree of bias.}, author = {Gary King and Robert X Browning} } @article {King87, title = {Presidential Appointments to the Supreme Court: Adding Systematic Explanation to Probabilistic Description}, journal = {American Politics Quarterly}, volume = {15}, year = {1987}, month = {July}, pages = {373{\textendash}386}, abstract = {Three articles, published in the leading journals of three disciplines over the last five decades, have each used the Poisson probability distribution to help describe the frequency with which presidents were able to appoint United States Supreme Court Justices. This work challenges these previous findings with a new model of Court appointments. The analysis demonstrates that the number of appointments a president can expect to make in a given year is a function of existing measurable variables.}, author = {Gary King} } @article {BroKin87, title = {Seats, Votes, and Gerrymandering: Measuring Bias and Representation in Legislative Redistricting}, journal = {Law and Policy}, volume = {9}, year = {1987}, month = {July}, pages = {305{\textendash}322}, abstract = {The Davis v. Bandemer case focused much attention on the problem of using statistical evidence to demonstrate the existence of political gerrymandering. In this paper, we evaluate the uses and limitations of measures of the seat-votes relationship in the Bandemer case. We outline a statistical method we have developed that can be used to estimate bias and the form of representation in legislative redistricting. We apply this method to Indiana State House and Senate elections for the period 1972 to 1984 and demonstrate a maximum bias 6.2\% toward the Republicans in the House and a 2.8\% bias in the Senate.}, author = {Robert X Browning and Gary King} } @article {KinMer86, title = {The Development of Political Activists: A Model of Early Learning}, journal = {Social Science Quarterly}, volume = {67}, year = {1986}, month = {September}, pages = {473{\textendash}490}, abstract = {An analysis of panel data reveals the unique importance of early learning to the development of political activism among Americans. A combination of two learning models{\textendash} the frequently used crystallization model and the rarely analyzed sensitization model{\textendash} is advanced as most appropriate for understanding political socialization and the development of political activism. The findings contribute to research on elite behavior and on the process of political socialization.}, author = {Gary King and Richard Merelman} } @article {King86, title = {How Not to Lie With Statistics: Avoiding Common Mistakes in Quantitative Political Science}, journal = {American Journal of Political Science}, volume = {30}, year = {1986}, month = {August}, pages = {666{\textendash}687}, abstract = {This article identifies a set of serious theoretical mistakes appearing with troublingly high frequency throughout the quantitative political science literature. These mistakes are all based on faulty statistical theory or on erroneous statistical analysis. Through algebraic and interpretive proofs, some of the most commonly made mistakes are explicated and illustrated. The theoretical problem underlying each is highlighted, and suggested solutions are provided throughout. It is argued that closer attention to these problems and solutions will result in more reliable quantitative analyses and more useful theoretical contributions.}, author = {Gary King} } @article {King86c, title = {Political Parties and Foreign Policy: A Structuralist Approach}, journal = {Political Psychology}, volume = {7}, year = {1986}, month = {March}, pages = {83{\textendash}101}, abstract = {This article introduces the theory and approach of structural anthropology and applies it to a problem in American political science. Through this approach, the "bipartisan foreign policy hypothesis" and that "two presidencies hypothesis" are reformulated and reconsidered. Until now participants in the debate over each have only rarely built on, or even cited, the other{\textquoteright}s research. An additional problem is that the widespread conventional wisdom in support of the two hypotheses is inconsistent with systematic scholarly analyses. This paper demonstrates that the two hypotheses are drawn from the same underlying structure. Each hypothesis and the theoretical model it implies is conceptually and empirically extended to take into account the differences between congressional leaders and members. Then, historical examples and statistical analyses of House roll call data are used to demonstrate that the hypotheses, while sometimes supported for the congressional members, are far more applicable to leadership decision making. Conclusions suggest that conventional wisdom be revised to take these differences into account.}, author = {Gary King} } @article {King86b, title = {The Significance of Roll Calls in Voting Bodies: A Model and Statistical Estimation}, journal = {Social Science Research}, volume = {15}, year = {1986}, month = {June}, pages = {135{\textendash}152}, abstract = {In the long history of legislative roll call analyses, there continues to exist a particularly troubling problem: There is no satisfactory method for measuring the relative importance or significance of individual roll calls. A measure of roll call significance would be interesting in and of itself, but many have realized that it could also substantially improve empirical research. The consequence of this situation is that hundreds of researchers risk heteroskedastic disturbances (resulting in inefficient estimates and biased standard errors and test statistics), are unable to appropriately choose the roll calls most suited to their theory (resulting in analyses that may not correctly test their theory), and often use methods that create more problems than they solve (resulting in selection bias, unrealistic weighting schemes, or relatively subjective measures). This article introduces a new method designed to meet these problems. Based on an application of Box-Tiao intervention analysis, the method extracts from observed voting participation scores the "revealed preferences" of legislators as a measure of roll call significance. Applying this method to roll calls from the U.S. Senate demonstrates the success of the method and suggests its utility in applied research.}, author = {Gary King} } @article {39686, title = {Book Review of {\textquoteleft}Forecasting Presidential Elections{\textquoteright}}, journal = {American Political Science Review}, volume = {79}, number = {3}, year = {1985}, pages = {855}, abstract = {This is a book review of Steven J. Rosenstone, Forecasting Presidential Elections, New Haven: Yale University Press, 1983. }, author = {Gary King} }