@article {16565, title = {Code Coverage Differences of Java Bytecode and Source Code}, journal = {Software Quality Journal}, volume = {27}, year = {2019}, month = {03/2019}, pages = {79-123}, abstract = {Many software testing fields, like white-box testing, test case generation, test prioritization, and fault localization, depend on code coverage measurement. If used as an overall completeness measure, the minor inaccuracies of coverage data reported by a tool do not matter that much; however, in certain situations, they can lead to serious confusion. For example, a code element that is falsely reported as covered can introduce false confidence in the test. This work investigates code coverage measurement issues for the Java programming language. For Java, the prevalent approach to code coverage measurement is using bytecode instrumentation due to its various benefits over source code instrumentation. As we have experienced, bytecode instrumentation-based code coverage tools produce different results than source code instrumentation-based ones in terms of the reported items as covered. We report on an empirical study to compare the code coverage results provided by tools using the different instrumentation types for Java coverage measurement on the method level. In particular, we want to find out how much a bytecode instrumentation approach is inaccurate compared to a source code instrumentation method. The differences are systematically investigated both in quantitative (how much the outputs differ) and in qualitative terms (what the causes for the differences are). In addition, the impact on test prioritization and test suite reduction{\textemdash}a possible application of coverage measurement{\textemdash}is investigated in more detail as well.}, keywords = {Code coverage, Coverage tools, Empirical study, Java bytecode instrumentation, select:quality, Source code instrumentation, White-box testing}, doi = {10.1007/s11219-017-9389-z}, url = {https://link.springer.com/article/10.1007/s11219-017-9389-z$\#$citeas}, author = {Ferenc Horv{\'a}th and Tam{\'a}s Gergely and {\'A}rp{\'a}d Besz{\'e}des and D{\'a}vid Tengeri and Gerg{\H o} Balogh and Tibor Gyimothy} } @article {Kicsi:JSS:2019:SPL-adoption, title = {Feature Analysis using Information Retrieval, Community Detection and Structural Analysis Methods in Product Line Adoption}, journal = {Journal of Systems and Software}, volume = {155}, year = {2019}, month = {sep}, pages = {70-90}, keywords = {select:deep}, doi = {10.1016/j.jss.2019.05.001}, author = {Andr{\'a}s Kicsi and Viktor Csuvik and L{\'a}szl{\'o} Vid{\'a}cs and Ferenc Horv{\'a}th and {\'A}rp{\'a}d Besz{\'e}des and Tibor Gyimothy and Ferenc Kocsis} } @conference {Horvath:IBF:2019:IFL, title = {A New Interactive Fault Localization Method with Context Aware User Feedback}, booktitle = {Proceedings of the First International Workshop on Intelligent Bug Fixing (IBF 2019)}, series = {IBF 2019}, year = {2019}, month = {feb}, pages = {23-28}, publisher = {IEEE}, organization = {IEEE}, address = {Passau, Germany}, isbn = {978-1-7281-1809-3}, doi = {10.1109/IBF.2019.8665415}, author = {Ferenc Horv{\'a}th and Victor Schnepper Lacerda and {\'A}rp{\'a}d Besz{\'e}des and L{\'a}szl{\'o} Vid{\'a}cs and Tibor Gyimothy} } @article {16564, title = {Differences Between a Static and a Dynamic Test-to-Code Traceability Recovery Method}, journal = {Software Quality Journal}, volume = {27}, year = {2018}, month = {06/2019}, pages = {797-822}, abstract = {Recovering test-to-code traceability links may be required in virtually every phase of development. This task might seem simple for unit tests thanks to two fundamental unit testing guidelines: isolation (unit tests should exercise only a single unit) and separation (they should be placed next to this unit). However, practice shows that recovery may be challenging because the guidelines typically cannot be fully followed. Furthermore, previous works have already demonstrated that fully automatic test-to-code traceability recovery for unit tests is virtually impossible in a general case. In this work, we propose a semi-automatic method for this task, which is based on computing traceability links using static and dynamic approaches, comparing their results and presenting the discrepancies to the user, who will determine the final traceability links based on the differences and contextual information. We define a set of discrepancy patterns, which can help the user in this task. Additional outcomes of analyzing the discrepancies are structural unit testing issues and related refactoring suggestions. For the static test-to-code traceability, we rely on the physical code structure, while for the dynamic, we use code coverage information. In both cases, we compute combined test and code clusters which represent sets of mutually traceable elements. We also present an empirical study of the method involving 8 non-trivial open source Java systems.}, keywords = {Code coverage, refactoring, select:quality, Structural test smells, Test-to-code traceability, Traceability link recovery, Unit testing}, doi = {10.1007/s11219-018-9430-x}, url = {https://link.springer.com/article/10.1007/s11219-018-9430-x}, author = {Tam{\'a}s Gergely and Gerg{\H o} Balogh and Ferenc Horv{\'a}th and B{\'e}la Vancsics and {\'A}rp{\'a}d Besz{\'e}des and Tibor Gyimothy} }