@article{BueckerHotiRose2024, author = {B{\"u}cker, Michael and Hoti, Kreshnik and Rose, Olaf}, title = {Artificial intelligence to assist decision-making on pharmacotherapy: A feasibility study}, series = {Exploratory Research in Clinical and Social Pharmacy}, volume = {15}, journal = {Exploratory Research in Clinical and Social Pharmacy}, issn = {2667-2766}, doi = {10.25974/fhms-18142}, url = {http://nbn-resolving.de/urn:nbn:de:hbz:836-opus-181429}, pages = {100491 -- 100491}, year = {2024}, abstract = {Background Artificial intelligence (AI) has the capability to analyze vast amounts of data and has been applied in various healthcare sectors. However, its effectiveness in aiding pharmacotherapy decision-making remains uncertain due to the intricate, patient-specific, and dynamic nature of this field. Objective This study sought to investigate the potential of AI in guiding pharmacotherapy decisions using clinical data such as diagnoses, laboratory results, and vital signs obtained from routine patient care. Methods Data of a previous study on medication therapy optimization was updated and adapted for the purpose of this study. Analysis was conducted using R software along with the tidymodels extension packages. The dataset was split into 74\% for training and 26\% for testing. Decision trees were selected as the primary model due to their simplicity, transparency, and interpretability. To prevent overfitting, bootstrapping techniques were employed, and hyperparameters were fine-tuned. Performance metrics such as areas under the curve and accuracies were computed. Results The study cohort comprised 101 elderly patients with multiple diagnoses and complex medication regimens. The AI model demonstrated prediction accuracies ranging from 38\% to 100\% for various cardiovascular drug classes. Laboratory data and vital signs could not be interpreted, as the effect and dependence were unclear for the model. The study revealed that the issue of AI lag time in responding to sudden changes could be addressed by manually adjusting decision trees, a task not feasible with neural networks. Conclusion In conclusion, the AI model exhibited promise in recommending appropriate medications for individual patients. While the study identified several obstacles during model development, most were successfully resolved. Future AI studies need to include the drug effect, not only the drug, if laboratory data is part of the decision. This could assist with interpreting their potential relationship. Human oversight and intervention remain essential for an AI-driven pharmacotherapy decision support system to ensure safe and effective patient care.}, language = {de} } @article{BueckerSzepannekGosiewskaetal.2021, author = {B{\"u}cker, Michael and Szepannek, Gero and Gosiewska, Alicja and Biecek, Przemyslaw}, title = {Transparency, Auditability and eXplainability of Machine Learning Models in Credit Scoring}, series = {Journal of the Operational Research Society}, journal = {Journal of the Operational Research Society}, doi = {10.1080/01605682.2021.1922098}, year = {2021}, language = {en} } @article{BueckerSzepannekGosiewskaetal.2020, author = {B{\"u}cker, Michael and Szepannek, Gero and Gosiewska, Alicja and Biecek, Przemyslaw}, title = {Transparency, Auditability and eXplainability of Machine Learning Models in Credit Scoring}, series = {arXiv}, volume = {2009.13384}, journal = {arXiv}, pages = {1 -- 30}, year = {2020}, abstract = {A major requirement for credit scoring models is to provide a maximally accurate risk prediction. Additionally, regulators demand these models to be transparent and auditable. Thus, in credit scoring, very simple predictive models such as logistic regression or decision trees are still widely used and the superior predictive power of modern machine learning algorithms cannot be fully leveraged. Significant potential is therefore missed, leading to higher reserves or more credit defaults. This paper works out different dimensions that have to be considered for making credit scoring models understandable and presents a framework for making ``black box'' machine learning models transparent, auditable and explainable. Following this framework, we present an overview of techniques, demonstrate how they can be applied in credit scoring and how results compare to the interpretability of score cards. A real world case study shows that a comparable degree of interpretability can be achieved while machine learning techniques keep their ability to improve predictive power.}, language = {en} } @article{HoopsBuecker2014, author = {Hoops, Christian and B{\"u}cker, Michael}, title = {Determinants, Moderators and Consequences of Organizational Interaction Orientation}, series = {Journal of Entrepreneurship Management and Innovation}, volume = {9}, journal = {Journal of Entrepreneurship Management and Innovation}, number = {4}, pages = {73 -- 100}, year = {2014}, language = {en} } @article{BueckervanKampenKraemer2013, author = {B{\"u}cker, Michael and van Kampen, Maarten and Kr{\"a}mer, Walter}, title = {Reject inference in consumer credit scoring with nonignorable missing data}, series = {Journal of Banking \& Finance}, volume = {37}, journal = {Journal of Banking \& Finance}, number = {3}, doi = {10.1016/j.jbankfin.2012.11.002}, pages = {1040 -- 1045}, year = {2013}, language = {en} } @article{BueckerKraemerArnold2012, author = {B{\"u}cker, Michael and Kr{\"a}mer, Walter and Arnold, Matthias}, title = {A Hausman test for non-ignorability}, series = {Economics Letters}, volume = {114}, journal = {Economics Letters}, number = {1}, doi = {10.1016/j.econlet.2011.08.025}, pages = {23 -- 25}, year = {2012}, language = {en} } @article{KraemerBuecker2011, author = {Kr{\"a}mer, Walter and B{\"u}cker, Michael}, title = {Probleme des Qualit{\"a}tsvergleichs von Kreditausfallprognosen}, series = {AStA Wirtschafts- und Sozialstatistisches Archiv}, volume = {5}, journal = {AStA Wirtschafts- und Sozialstatistisches Archiv}, number = {1}, doi = {10.1007/s11943-011-0096-0}, pages = {39 -- 58}, year = {2011}, language = {de} }