@proceedings {16563, title = {BugsJS: a Benchmark of JavaScript Bugs}, year = {2019}, pages = {90-101}, address = {Xi{\textquoteright}an, China}, abstract = {JavaScript is a popular programming language that is also error-prone due to its asynchronous, dynamic, and loosely-typed nature. In recent years, numerous techniques have been proposed for analyzing and testing JavaScript applications. However, our survey of the literature in this area revealed that the proposed techniques are often evaluated on different datasets of programs and bugs. The lack of a commonly used benchmark limits the ability to perform fair and unbiased comparisons for assessing the efficacy of new techniques. To fill this gap, we propose BugsJS, a benchmark of 453 real, manually validated JavaScript bugs from 10 popular JavaScript server-side programs, comprising 444k LOC in total. Each bug is accompanied by its bug report, the test cases that detect it, as well as the patch that fixes it. BugsJS features a rich interface for accessing the faulty and fixed versions of the programs and executing the corresponding test cases, which facilitates conducting highly-reproducible empirical studies and comparisons of JavaScript analysis and testing tools.}, keywords = {benchmark, Bug database, BugsJS, JavaScript, literature survey, real bugs, reproducibility, select:quality}, doi = {10.1109/ICST.2019.00019}, url = {https://ieeexplore.ieee.org/document/8730197/authors$\#$authors}, author = {P{\'e}ter Gyimesi and B{\'e}la Vancsics and Andrea Stocco and Davood Mazinanian and {\'A}rp{\'a}d Besz{\'e}des and Rudolf Ferenc and Ali Mesbah} } @article {16564, title = {Differences Between a Static and a Dynamic Test-to-Code Traceability Recovery Method}, journal = {Software Quality Journal}, volume = {27}, year = {2018}, month = {06/2019}, pages = {797-822}, abstract = {Recovering test-to-code traceability links may be required in virtually every phase of development. This task might seem simple for unit tests thanks to two fundamental unit testing guidelines: isolation (unit tests should exercise only a single unit) and separation (they should be placed next to this unit). However, practice shows that recovery may be challenging because the guidelines typically cannot be fully followed. Furthermore, previous works have already demonstrated that fully automatic test-to-code traceability recovery for unit tests is virtually impossible in a general case. In this work, we propose a semi-automatic method for this task, which is based on computing traceability links using static and dynamic approaches, comparing their results and presenting the discrepancies to the user, who will determine the final traceability links based on the differences and contextual information. We define a set of discrepancy patterns, which can help the user in this task. Additional outcomes of analyzing the discrepancies are structural unit testing issues and related refactoring suggestions. For the static test-to-code traceability, we rely on the physical code structure, while for the dynamic, we use code coverage information. In both cases, we compute combined test and code clusters which represent sets of mutually traceable elements. We also present an empirical study of the method involving 8 non-trivial open source Java systems.}, keywords = {Code coverage, refactoring, select:quality, Structural test smells, Test-to-code traceability, Traceability link recovery, Unit testing}, doi = {10.1007/s11219-018-9430-x}, url = {https://link.springer.com/article/10.1007/s11219-018-9430-x}, author = {Tam{\'a}s Gergely and Gerg{\H o} Balogh and Ferenc Horv{\'a}th and B{\'e}la Vancsics and {\'A}rp{\'a}d Besz{\'e}des and Tibor Gyimothy} }