@article {HKF17, title = {Empirical Evaluation of Software Maintainability Based on a Manually Validated Refactoring Dataset}, journal = {Information and Software Technology}, volume = {95}, year = {2018}, note = {Accepted, to appear.}, month = {nov}, chapter = {313}, abstract = {Context: Refactoring is a technique for improving the internal structure of software systems. It has a solid theoretical background while being used in development practice also. However, we lack empirical research results on the real effect of code refactoring and its application. Objective: This paper presents a manually validated subset of a previously published dataset containing the refactorings extracted by the RefFinder tool, code metrics, and maintainability of 7 open-source systems. We found that RefFinder had around 27\% overall average precision on the subject systems, thus our manually validated subset has substantial added value. Using the dataset, we studied several aspects of the refactored and non-refactored source code elements (classes and methods), like the differences in their maintainability and source code metrics. Method: We divided the source code elements into a group containing the refactored elements and a group with non-refactored elements. We analyzed the elements{\textquoteright} characteristics in these groups using correlation analysis, Mann-Whitney U test and effect size measures. Results: Source code elements subjected to refactorings had significantly lower maintainability than elements not affected by refactorings. Moreover, refactored elements had significantly higher size related metrics, complexity, and coupling. Also these metrics changed more significantly in the refactored elements. The results are mostly in line with our previous findings on the not validated dataset, with the difference that clone metrics had no strong connection with refactoring. Conclusions: Compared to the preliminary analysis using a not validated dataset, the manually validated dataset led to more significant results, which suggests that developers find targets for refactorings based on some internal quality properties of the source code, like their size, complexity or coupling, but not clone related metrics as reported in our previous studies. They do not just use these properties for identifying targets, but also control them with refactorings.}, keywords = {Code refactoring, Empirical study, Manually validated empirical dataset, Software maintainability, Source code metrics}, issn = {0950-5849}, doi = {10.1016/j.infsof.2017.11.012}, url = {http://www.sciencedirect.com/science/article/pii/S0950584916303561}, author = {P Heged{\H u}s and K{\'a}d{\'a}r, Istv{\'a}n and Rudolf Ferenc and Tibor Gyim{\'o}thy} } @conference {KHF16c, title = {Assessment of the Code Refactoring Dataset Regarding the Maintainability of Methods}, booktitle = {Proceedings of the 16th International Conference on Computational Science and Its Applications (ICCSA 2016)}, year = {2016}, month = {jul}, pages = {610{\textendash}624}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Beijing, China}, abstract = {Code refactoring has a solid theoretical background while being used in development practice at the same time. However, previous works found controversial results on the nature of code refactoring activities in practice. Both their application context and impact on code quality needs further examination. Our paper encourages the investigation of code refactorings in practice by providing an excessive open dataset of source code metrics and applied refactorings through several releases of 7 open-source systems. We already demonstrated the practical value of the dataset by analyzing the quality attributes of the refactored source code classes and the values of source code metrics improved by those refactorings. In this paper, we have gone one step deeper and explored the effect of code refactorings at the level of methods. We found that similarly to class level, lower maintainability indeed triggers more code refactorings in practice at the level of methods and these refactorings significantly decrease size, coupling and clone metrics.}, keywords = {Code refactoring, Empirical study, Refactoring dataset, Software maintainability}, doi = {10.1007/978-3-319-42089-9_43}, url = {https://link.springer.com/chapter/10.1007\%2F978-3-319-42089-9_43}, author = {K{\'a}d{\'a}r, Istv{\'a}n and P Heged{\H u}s and Rudolf Ferenc and Tibor Gyim{\'o}thy} } @conference {KHF16, title = {A Code Refactoring Dataset and Its Assessment Regarding Software Maintainability}, booktitle = {Proceedings of the 23rd IEEE International Conference on Software Analysis, Evolution, and Reengineering (SANER 2016)}, year = {2016}, pages = {599{\textendash}603}, publisher = {IEEE Computer Society}, organization = {IEEE Computer Society}, address = {Suita, Osaka, Japan}, abstract = {It is very common in various fields that there is a gap between theoretical results and their practical applications. This is true for code refactoring as well, which has a solid theoretical background while being used in development practice at the same time. However, more and more studies suggest that developers perform code refactoring entirely differently than the theory would suggest. Our paper encourages the further investigation of code refactorings in practice by providing an excessive open dataset of source code metrics and applied refactorings through several releases of 7 open-source systems. As a first step of processing this dataset, we examined the quality attributes of the refactored source code classes and the values of source code metrics improved by those refactorings. Our early results show that lower maintainability indeed triggers more code refactorings in practice and these refactorings significantly decrease complexity, code lines, coupling and clone metrics. However, we observed a decrease in comment related metrics in the refactored code.}, keywords = {Code refactoring, Empirical study, Software maintainability}, doi = {10.1109/SANER.2016.42}, url = {http://ieeexplore.ieee.org/document/7476680/}, author = {K{\'a}d{\'a}r, Istv{\'a}n and P Heged{\H u}s and Rudolf Ferenc and Tibor Gyim{\'o}thy} } @conference {KHF16b, title = {A Manually Validated Code Refactoring Dataset and Its Assessment Regarding Software Maintainability}, booktitle = {Proceedings of the 12th ACM International Conference on Predictive Models and Data Analytics in Software Engineering (PROMISE 2016)}, year = {2016}, month = {sep}, pages = {10:1{\textendash}10:4}, publisher = {ACM}, organization = {ACM}, address = {Ciudad Real, Spain}, abstract = {Refactoring is a popular technique for improving the internal structure of software systems. It has a solid theoretical background while being used in development practice at the same time. However, we lack empirical research results on the real effect of code refactoring and its ways of application. This paper presents a manually validated dataset of applied refactorings and source code metrics and maintainability of 7 open-source systems. It is a subset of our previously published dataset containing the refactoring instances automatically extracted by the RefFinder tool. We found that RefFinder had around 27\% overall average precision on the subject systems, thus our new {\textendash} manually validated {\textendash} subset has substantial added value allowing researchers to perform more accurate empirical investigations. Using this data, we were able to study whether refactorings were really triggered by poor maintainability of the code, or by other aspects. The results show that source code elements subject to refactorings had significantly lower maintainability values (approximated by source code metric aggregation) than elements not affected by refactorings between two releases.}, keywords = {Code refactoring, Empirical study, Manually validated empirical dataset, Software maintainability}, doi = {10.1145/2972958.2972962}, url = {https://dl.acm.org/citation.cfm?doid=2972958.2972962}, author = {K{\'a}d{\'a}r, Istv{\'a}n and P Heged{\H u}s and Rudolf Ferenc and Tibor Gyim{\'o}thy} } @conference {KHF15, title = {Adding Constraint Building Mechanisms to a Symbolic Execution Engine Developed for Detecting Runtime Errors}, booktitle = {Proceedings of the 15th International Conference on Computational Science and Its Applications (ICCSA 2015)}, series = {Lecture Notes in Computer Science (LNCS)}, volume = {9159}, year = {2015}, month = {jun}, pages = {20{\textendash}35}, publisher = {Springer-Verlag}, organization = {Springer-Verlag}, address = {Banff, Alberta, Canada}, abstract = {Most of the runtime failures of a software system can be revealed during test execution only, which has a very high cost. The symbolic execution engine developed at the Software Engineering Department of University of Szeged is able to detect runtime errors (such as null pointer dereference, bad array indexing, division by zero) in Java programs without running the program in real-life environment. In this paper we present a constraint system building mechanism which improves the accuracy of the runtime errors found by the symbolic execution engine mentioned above. We extend the original principles of symbolic execution by tracking the dependencies of the symbolic variables and substituting them with concrete values if the built constraint system unambiguously determines their value. The extended symbolic execution checker was tested on real-life open-source systems as well.}, keywords = {Constraint system building, Java runtime errors, Software engineering, symbolic execution}, doi = {10.1007/978-3-319-21413-9_2}, url = {https://link.springer.com/chapter/10.1007\%2F978-3-319-21413-9_2}, author = {K{\'a}d{\'a}r, Istv{\'a}n and P Heged{\H u}s and Rudolf Ferenc} } @article {KHF14, title = {Runtime Exception Detection in {J}ava Programs Using Symbolic Execution}, journal = {Acta Cybernetica}, volume = {21}, number = {3}, year = {2014}, pages = {331{\textendash}352}, abstract = {Most of the runtime failures of a software system can be revealed during test execution only, which has a very high cost. In Java programs, runtime failures are manifested as unhandled runtime exceptions. In this paper we present an approach and tool for detecting runtime exceptions in Java programs without having to execute tests on the software. We use the symbolic execution technique to implement the approach. By executing the methods of the program symbolically we can determine those execution branches that throw exceptions. Our algorithm is able to generate concrete test inputs also that cause the program to fail in runtime. We used the Symbolic PathFinder extension of the Java PathFinder as the symbolic execution engine. Besides small example codes we evaluated our algorithm on three open source systems: jEdit, ArgoUML, and log4j. We found multiple errors in the log4j system that were also reported as real bugs in its bug tracking system.}, keywords = {Java runtime exception, rule checking, symbolic execution}, issn = {0324-721X}, doi = {10.14232/actacyb.21.3.2014.4}, url = {http://www.inf.u-szeged.hu/actacybernetica/edb/vol21n3/Kadar_2014_ActaCybernetica.xml}, author = {K{\'a}d{\'a}r, Istv{\'a}n and P Heged{\H u}s and Rudolf Ferenc} } @conference {KHF13, title = {Runtime Exception Detection in {J}ava Programs Using Symbolic Execution}, booktitle = {Proceedings of the 13th Symposium on Programming Languages and Software Tools (SPLST 2013)}, year = {2013}, month = {aug}, pages = {215{\textendash}229}, publisher = {University of Szeged}, organization = {University of Szeged}, address = {Szeged, Hungary}, abstract = {Most of the runtime failures of a software system can be revealed during test execution only, which has a very high cost. In Java programs, runtime failures are manifested as unhandled runtime exceptions. In this paper we present an approach and tool for detecting runtime exceptions in Java programs without having to execute tests on the software. We use the symbolic execution technique to implement the approach. By executing the methods of the program symbolically we can determine those execution branches that throw exceptions. Our algorithm is able to generate concrete test inputs also that cause the program to fail in runtime. We used the Symbolic PathFinder extension of the Java PathFinder as the symbolic execution engine. Besides small example codes we evaluated our algorithm on three open source systems: jEdit, ArgoUML, and log4j. We found multiple errors in the log4j system that were also reported as real bugs in its bug tracking system.}, keywords = {Java runtime exception, Java Virtual Machine, rule checking, symbolic execution}, author = {K{\'a}d{\'a}r, Istv{\'a}n and P Heged{\H u}s and Rudolf Ferenc} }