@proceedings {16563, title = {BugsJS: a Benchmark of JavaScript Bugs}, year = {2019}, pages = {90-101}, address = {Xi{\textquoteright}an, China}, abstract = {JavaScript is a popular programming language that is also error-prone due to its asynchronous, dynamic, and loosely-typed nature. In recent years, numerous techniques have been proposed for analyzing and testing JavaScript applications. However, our survey of the literature in this area revealed that the proposed techniques are often evaluated on different datasets of programs and bugs. The lack of a commonly used benchmark limits the ability to perform fair and unbiased comparisons for assessing the efficacy of new techniques. To fill this gap, we propose BugsJS, a benchmark of 453 real, manually validated JavaScript bugs from 10 popular JavaScript server-side programs, comprising 444k LOC in total. Each bug is accompanied by its bug report, the test cases that detect it, as well as the patch that fixes it. BugsJS features a rich interface for accessing the faulty and fixed versions of the programs and executing the corresponding test cases, which facilitates conducting highly-reproducible empirical studies and comparisons of JavaScript analysis and testing tools.}, keywords = {benchmark, Bug database, BugsJS, JavaScript, literature survey, real bugs, reproducibility, select:quality}, doi = {10.1109/ICST.2019.00019}, url = {https://ieeexplore.ieee.org/document/8730197/authors$\#$authors}, author = {P{\'e}ter Gyimesi and B{\'e}la Vancsics and Andrea Stocco and Davood Mazinanian and {\'A}rp{\'a}d Besz{\'e}des and Rudolf Ferenc and Ali Mesbah} }