<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE root>
<article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:ali="http://www.niso.org/schemas/ali/1.0/" article-type="research-article" dtd-version="1.2" xml:lang="en"><front><journal-meta><journal-id journal-id-type="publisher-id">Discrete and Continuous Models and Applied Computational Science</journal-id><journal-title-group><journal-title xml:lang="en">Discrete and Continuous Models and Applied Computational Science</journal-title><trans-title-group xml:lang="ru"><trans-title>Discrete and Continuous Models and Applied Computational Science</trans-title></trans-title-group></journal-title-group><issn publication-format="print">2658-4670</issn><issn publication-format="electronic">2658-7149</issn><publisher><publisher-name xml:lang="en">Peoples' Friendship University of Russia named after Patrice Lumumba (RUDN University)</publisher-name></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">47503</article-id><article-id pub-id-type="doi">10.22363/2658-4670-2025-33-4-374-388</article-id><article-id pub-id-type="edn">HZSSDU</article-id><article-categories><subj-group subj-group-type="toc-heading" xml:lang="en"><subject>Computer Science</subject></subj-group><subj-group subj-group-type="toc-heading" xml:lang="ru"><subject>Информатика и вычислительная техника</subject></subj-group><subj-group subj-group-type="article-type"><subject>Research Article</subject></subj-group></article-categories><title-group><article-title xml:lang="en">Adaptive neural network method for multidimensional integration in arbitrary subdomains</article-title><trans-title-group xml:lang="ru"><trans-title>Адаптивный нейросетевой метод многомерного интегрирования для произвольных подобластей</trans-title></trans-title-group></title-group><contrib-group><contrib contrib-type="author"><contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-9229-2535</contrib-id><name-alternatives><name xml:lang="en"><surname>Shcherbak</surname><given-names>Margarita R.</given-names></name><name xml:lang="ru"><surname>Щербак</surname><given-names>М. Р.</given-names></name></name-alternatives><address><country country="RU">Russian Federation</country></address><bio xml:lang="en"><p>Student of Department of Computational Mathematics and Artificial Intelligence </p></bio><email>1032216537@rudn.ru</email><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-3918-3620</contrib-id><name-alternatives><name xml:lang="en"><surname>Abdullina</surname><given-names>Laysan R.</given-names></name><name xml:lang="ru"><surname>Абдуллина</surname><given-names>Л.  Р.</given-names></name></name-alternatives><address><country country="RU">Russian Federation</country></address><bio xml:lang="en"><p>Student of Department of Computational Mathematics and Artificial Intelligence </p></bio><email>1032216538@rudn.ru</email><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><contrib-id contrib-id-type="orcid">https://orcid.org/0000-0002-5321-9650</contrib-id><contrib-id contrib-id-type="scopus">57201380251</contrib-id><name-alternatives><name xml:lang="en"><surname>Salpagarov</surname><given-names>Soltan I.</given-names></name><name xml:lang="ru"><surname>Салпагаров</surname><given-names>С. И.</given-names></name></name-alternatives><address><country country="RU">Russian Federation</country></address><bio xml:lang="en"><p>Candidate of Physical and Mathematical Sciences, associate Professor of Department of Computational Mathematics and Artificial Intelligence</p></bio><email>salpagarov-si@rudn.ru</email><xref ref-type="aff" rid="aff1"/></contrib><contrib contrib-type="author"><contrib-id contrib-id-type="orcid">https://orcid.org/0009-0003-5906-9993</contrib-id><name-alternatives><name xml:lang="en"><surname>Fedorishchev</surname><given-names>Vyacheslav M.</given-names></name><name xml:lang="ru"><surname>Федорищев</surname><given-names>В.  М.</given-names></name></name-alternatives><address><country country="RU">Russian Federation</country></address><bio xml:lang="en"><p>Student of Department of Computational Mathematics and Artificial Intelligence </p></bio><email>1142230295@rudn.ru</email><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff-alternatives id="aff1"><aff><institution xml:lang="en">RUDN University</institution></aff><aff><institution xml:lang="ru">Российский университет дружбы народов</institution></aff></aff-alternatives><pub-date date-type="pub" iso-8601-date="2025-12-07" publication-format="electronic"><day>07</day><month>12</month><year>2025</year></pub-date><volume>33</volume><issue>4</issue><issue-title xml:lang="en">VOL 33, No4 (2025)</issue-title><issue-title xml:lang="ru">ТОМ 33, №4 (2025)</issue-title><fpage>374</fpage><lpage>388</lpage><history><date date-type="received" iso-8601-date="2025-12-06"><day>06</day><month>12</month><year>2025</year></date></history><permissions><copyright-statement xml:lang="en">Copyright ©; 2025, Shcherbak M.R., Abdullina L.R., Salpagarov S.I., Fedorishchev V.M.</copyright-statement><copyright-statement xml:lang="ru">Copyright ©; 2025, Щербак М.Р., Абдуллина Л.Р., Салпагаров С.И., Федорищев В.М.</copyright-statement><copyright-year>2025</copyright-year><copyright-holder xml:lang="en">Shcherbak M.R., Abdullina L.R., Salpagarov S.I., Fedorishchev V.M.</copyright-holder><copyright-holder xml:lang="ru">Щербак М.Р., Абдуллина Л.Р., Салпагаров С.И., Федорищев В.М.</copyright-holder><ali:free_to_read xmlns:ali="http://www.niso.org/schemas/ali/1.0/"/><license><ali:license_ref xmlns:ali="http://www.niso.org/schemas/ali/1.0/">https://creativecommons.org/licenses/by-nc/4.0</ali:license_ref></license></permissions><self-uri xlink:href="https://journals.rudn.ru/miph/article/view/47503">https://journals.rudn.ru/miph/article/view/47503</self-uri><abstract xml:lang="en"><p>Multidimensional integration is a fundamental problem in computational mathematics with numerous applications in physics, engineering, and data science. Traditional numerical methods such as Gauss–Legendre quadrature [1] and Monte Carlo techniques face significant challenges in high-dimensional spaces due to the curse of dimensionality, often requiring substantial computational resources and suffering from accuracy degradation. This study proposes an adaptive neural network-based method for efficient multidimensional integration over arbitrary subdomains. The approach optimizes training sample composition through a balancing parameter $\rho $, which controls the proportion of points generated via a Metropolis–Hastings inspired method versus uniform sampling. This enables the neural network to effectively capture complex integrand behaviors, particularly in regions with sharp variations. A key innovation of the method is its ``train once, integrate anywhere'' capability: a single neural network trained on a large domain can subsequently compute integrals over any arbitrary subdomain without retraining, significantly reducing computational overhead. Experiments were conducted on three function types---quadratic, Corner Peak, and sine of sum of squares---across dimensions 2D to 6D. Integration accuracy was evaluated using the Correct Digits (CD) metric. Results show that the neural network method achieves comparable or superior accuracy to traditional methods (Gauss–Legendre, Monte Carlo, Halton) for complex functions, while substantially reducing computation time. Optimal $\rho $ ranges were identified: 0.0--0.2 for smooth functions, and 0.3--0.5 for functions with sharp features. In multidimensional scenarios (4D, 6D), the method demonstrates stability at $\rho = 0.2\text {--}0.6$, outperforming stochastic methods though slightly less accurate than Latin hypercube sampling [2]. The proposed method offers a scalable, efficient alternative to classical integration techniques, particularly beneficial in high-dimensional settings and applications requiring repeated integration over varying subdomains.</p></abstract><trans-abstract xml:lang="ru"><p>Многомерное интегрирование является фундаментальной задачей вычислительной математики, имеющей многочисленные приложения в физике и инженерии. Традиционные численные методы, такие как квадратура Гаусса--Лежандра и методы Монте-Карло, сталкиваются со значительными трудностями в пространствах высокой размерности из-за «проклятия размерности»: они требуют больших вычислительных ресурсов и часто теряют точность. В данной работе предлагается адаптивный метод многомерного интегрирования, основанный на нейронной сети, для эффективного вычисления интегралов по произвольным подобластям. Подход оптимизирует состав обучающей выборки с помощью параметра балансировки $\rho $, который регулирует долю точек, сгенерированных методом, использующим модификацию алгоритма Метрополиса--Гастингса, по сравнению с равномерным выбором. Это позволяет нейронной сети эффективно определять сложное поведение подынтегральной функции, особенно в областях с резкими изменениями. Ключевым элементом данного метода является принцип «обучи один раз --- интегрируй где угодно»: одна нейронная сеть, обученная на большом домене, может впоследствии вычислять интегралы на любых произвольных подобластях без повторного обучения, что значительно снижает вычислительные затраты. Эксперименты проведены на трёх типах функций --- квадратичной, Corner Peak и синусе суммы квадратов --- в размерностях от 2 до 6. Точность интегрирования оценивалась с помощью метрики Correct Digits (CD). Результаты показывают, что наш метод обеспечивает сравнимую или более высокую точность по сравнению с традиционными методами (Гаусс--Лежандр, Монте-Карло, Халтона) для сложных функций, при этом существенно сокращая время вычислений. Оптимальные диапазоны $\rho $ составляют 0.0--0.2 для гладких функций и 0.3--0.5 для функций с резкими особенностями. В многомерных случаях (4D,6D) метод демонстрирует устойчивость при $\rho = 0.2\text {--}0.6$, превосходя стохастические методы, хотя и немного уступая латинскому гиперкубическому выбору. Предложенный метод представляет собой масштабируемую и эффективную альтернативу классическим методам интегрирования, особенно полезную в задачах высокой размерности и в приложениях, требующих многократного вычисления интегралов на различных подобластях.</p></trans-abstract><kwd-group xml:lang="en"><kwd>neural network integration</kwd><kwd>adaptive data generation</kwd><kwd>Levenberg--Marquardt optimization</kwd><kwd>multidimensional integrals</kwd></kwd-group><kwd-group xml:lang="ru"><kwd>нейросетевое интегрирование</kwd><kwd>адаптивная генерация данных</kwd><kwd>оптимизация Левенберга--Марквардта</kwd><kwd>многомерные интегралы</kwd></kwd-group><funding-group/></article-meta><fn-group/></front><body></body><back><ref-list><ref id="B1"><label>1.</label><mixed-citation>Press, W. H., Teukolsky, S. A., Vetterling, W. T. &amp; Flannery, B. P. Numerical Recipes: The Art of Scientific Computing 3rd (Cambridge University Press, Cambridge, UK, 2007).</mixed-citation></ref><ref id="B2"><label>2.</label><mixed-citation>McKay, M. D., Beckman, R. J. &amp; Conover, W. J. A Comparison of Three Methods for Selecting Values of Input Variables in the Analysis of Output from a Computer Code. Technometrics 21, 239–245. doi:10.1080/00401706.1979.10489755 (1979).</mixed-citation></ref><ref id="B3"><label>3.</label><mixed-citation>Bassi, H., Zhu, Y., Liang, S., Yin, J., Reeves, C. C., Vlček, V. &amp; Yang, C. Learning nonlinear integral operators via recurrent neural networks and its application in solving integro-differential equations. Machine Learning with Applications 15, 100524. doi:10.1016/j.mlwa.2023.100524 (Mar. 2024).</mixed-citation></ref><ref id="B4"><label>4.</label><mixed-citation>Maître, D. &amp; Santos-Mateos, R. Multi-variable integration with a neural network. Journal of High Energy Physics 2023, 221. doi:10.1007/JHEP03(2023)221 (Mar. 2023).</mixed-citation></ref><ref id="B5"><label>5.</label><mixed-citation>Li, S., Huang, X., Wang, X., et al. A new reliability analysis approach with multiple correlation neural networks method. Soft Computing 27, 7449–7458. doi:10.1007/s00500-022-07685-6 (June 2023).</mixed-citation></ref><ref id="B6"><label>6.</label><mixed-citation>Subr, K. Q-NET: A Network for Low-dimensional Integrals of Neural Proxies. Computer Graphics Forum 40, 61–71. doi:10.1111/cgf.14341 (2021).</mixed-citation></ref><ref id="B7"><label>7.</label><mixed-citation>Beck, C., Becker, S., Cheridito, P., Jentzen, A. &amp; Neufeld, A. Deep Splitting Method for Parabolic PDEs. SIAM Journal on Scientific Computing 43, A3135–A3154. doi:10.1137/19M1297919 (2021).</mixed-citation></ref><ref id="B8"><label>8.</label><mixed-citation>Wan, M., Pan, Y. &amp; Zhang, Z. A Physics-Informed Neural Network Integration Framework for Efficient Dynamic Fracture Simulation in an Explicit Algorithm. Applied Sciences 15, 10336. doi:10.3390/app151910336 (2025).</mixed-citation></ref><ref id="B9"><label>9.</label><mixed-citation>Nowak, A., Kustal, D., Sun, H. &amp; Blaszczyk, T. Neural network approximation of the composition of fractional operators and its application to the fractional Euler-Bernoulli beam equation. Applied Mathematics and Computation 501, 129475. doi:10.1016/j.amc.2025.129475 (2025).</mixed-citation></ref><ref id="B10"><label>10.</label><mixed-citation>Brunner, K. J., Fuchert, G., de Amorim Resende, F. B. L., Knauer, J., Hirsch, M., Wolf, R. C. &amp; the W7-X Team. Auto-encoding quadrature components of modulated dispersion interferometers. Plasma Physics and Controlled Fusion 67. Special Issue on the 6th European Conference on Plasma Diagnostics (ECPD 2025), 105007. doi:10.1088/1361-6587/ae0a80 (Oct. 2025).</mixed-citation></ref><ref id="B11"><label>11.</label><mixed-citation>Saxena, S., Bastek, J.-H., Spinola, M., Gupta, P. &amp; Kochmann, D. M. GNN-assisted phase space integration with application to atomistics. Mechanics of Materials 182, 104681. doi:10.1016/j. mechmat.2023.104681 (July 2023).</mixed-citation></ref><ref id="B12"><label>12.</label><mixed-citation>Saz Ulibarrena, V., Horn, P., Portegies Zwart, S., Sellentin, E., Koren, B. &amp; Cai, M. X. A hybrid approach for solving the gravitational N-body problem with Artificial Neural Networks. Journal of Computational Physics 496, 112596. doi:10.1016/j.jcp.2023.112596 (Jan. 2024).</mixed-citation></ref><ref id="B13"><label>13.</label><mixed-citation>Hu, Z., Shukla, K., Karniadakis, G. E. &amp; Kawaguchi, K. Tackling the curse of dimensionality with physics-informed neural networks. Neural Networks 176, 106369. doi:10.1016/j.neunet.2024. 106369 (Aug. 2024).</mixed-citation></ref><ref id="B14"><label>14.</label><mixed-citation>Cho, J., Nam, S., Yang, H., Yun, S.-B., Hong, Y. &amp; Park, E. Separable PINN: Mitigating the Curse of Dimensionality in Physics-Informed Neural Networks 2023.</mixed-citation></ref><ref id="B15"><label>15.</label><mixed-citation>Ayriyan, A., Grigorian, H. &amp; Papoyan, V. Sampling of Integrand for Integration Using Shallow Neural Network. Discrete and Continuous Models and Applied Computational Science 32, 38–47 (2024).</mixed-citation></ref><ref id="B16"><label>16.</label><mixed-citation>Metropolis, N., Rosenbluth, A. W., Rosenbluth, M. N., Teller, A. H. &amp; Teller, E. Equation of State Calculations by Fast Computing Machines. The Journal of Chemical Physics 21, 1087–1092. doi:10.1063/1.1699114 (1953).</mixed-citation></ref><ref id="B17"><label>17.</label><mixed-citation>Hastings, W. K. Monte Carlo sampling methods using Markov chains and their applications. Biometrika 57. _eprint: https://academic.oup.com/biomet/article-pdf/57/1/97/23940249/57-197.pdf, 97–109. doi:10.1093/biomet/57.1.97 (Apr. 1970).</mixed-citation></ref><ref id="B18"><label>18.</label><mixed-citation>Lloyd, S. Using Neural Networks for Fast Numerical Integration and Optimization. IEEE Access 8, 84519–84531. doi:10.1109/ACCESS.2020.2991966 (2020).</mixed-citation></ref><ref id="B19"><label>19.</label><mixed-citation>Cybenko, G. Approximation by superpositions of a sigmoidal function. Mathematics of Control Signals and Systems 2, 303–314. doi:10.1007/BF02551274 (Dec. 1989).</mixed-citation></ref><ref id="B20"><label>20.</label><mixed-citation>Marquardt, D. W. An Algorithm for Least-Squares Estimation of Nonlinear Parameters. Journal of the Society for Industrial and Applied Mathematics 11. Publisher: Society for Industrial and Applied Mathematics, 431–441. doi:10.1137/0111030 (June 1963).</mixed-citation></ref><ref id="B21"><label>21.</label><mixed-citation>Genz, A. A Package for Testing Multiple Integration Subroutines in Numerical Integration: Recent Developments, Software and Applications (eds Keast, P. &amp; Fairweather, G.) 337–340 (Springer, 1987). doi:10.1007/978-94-009-3889-2_33.</mixed-citation></ref><ref id="B22"><label>22.</label><mixed-citation>Anikina, A. et al. Structure and Features of the Software and Information Environment of the HybriLIT Heterogeneous Platform in Distributed Computer and Communication Networks (eds Vishnevsky, V. M., Samouylov, K. E. &amp; Kozyrev, D. V.) 444–457 (Springer Nature Switzerland, Cham, 2025). doi:10.1007/978-3-031-80853-1_33.</mixed-citation></ref><ref id="B23"><label>23.</label><mixed-citation>Abadi, M. et al. TensorFlow: Large-Scale Machine Learning on Heterogeneous Systems Software available from tensorflow.org. 2015.</mixed-citation></ref><ref id="B24"><label>24.</label><mixed-citation>Halton, J. H. On the efficiency of certain quasi-random sequences of points in evaluating multidimensional integrals. Numerische Mathematik 2, 84–90. doi:10.1007/BF01386213 (1960).</mixed-citation></ref></ref-list></back></article>
