forked from intro2ddsp/intro2ddsp.github.io
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreferences.bib
417 lines (417 loc) · 19.5 KB
/
references.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
@article{arik2018fast,
title = {Fast spectrogram inversion using multi-head convolutional neural networks},
author = {Ar{\i}k, Sercan {\"O} and Jun, Heewoo and Diamos, Gregory},
year = 2018,
journal = {IEEE Signal Processing Letters},
publisher = {IEEE},
volume = 26,
number = 1,
pages = {94--98}
}
@misc{barahona-rios_noisebandnet_2023,
title = {{{NoiseBandNet}}: {{Controllable Time-Varying Neural Synthesis}} of {{Sound Effects Using Filterbanks}}},
shorttitle = {{{NoiseBandNet}}},
author = {{Barahona-R{\'i}os}, Adri{\'a}n and Collins, Tom},
year = 2023,
month = jul,
publisher = {{arXiv}},
number = {arXiv:2307.08007},
doi = {10.48550/arXiv.2307.08007},
primaryclass = {cs, eess}
}
@misc{caillon_rave_2021,
title = {{{RAVE}}: {{A}} Variational Autoencoder for Fast and High-Quality Neural Audio Synthesis},
shorttitle = {{{RAVE}}},
author = {Caillon, Antoine and Esling, Philippe},
year = 2021,
month = dec,
url = {http://arxiv.org/abs/2111.05011},
urldate = {2022-03-08},
note = {{arXiv [Preprint]. Available at \url{https://doi.org/10.48550/arXiv.2111.05011} (Accessed 2022-03-08)}},
eprint = {2111.05011},
primaryclass = {cs, eess},
archiveprefix = {arxiv}
}
@inproceedings{carney_tone_2021,
title = {Tone {{Transfer}}: {{In-Browser Interactive Neural Audio Synthesis}}},
shorttitle = {Tone {{Transfer}}},
author = {Carney, Michelle and Li, Chong and Toh, Edwin and Yu, Ping and Engel, Jesse},
year = 2021,
booktitle = {Joint Proceedings of the ACM IUI 2021 Workshops}
}
@inproceedings{caspe_ddx7_2022,
title = {{{DDX7}}: {{Differentiable FM Synthesis}} of {{Musical Instrument Sounds}}},
author = {Caspe, Franco and McPherson, Andrew and Sandler, Mark},
year = 2022,
booktitle = {Proceedings of the 23rd International Society for Music Information Retrieval Conference}
}
@inproceedings{cherep2023synthax,
title = {SynthAX: A Fast Modular Synthesizer in JAX},
author = {Cherep, Manuel and Singh, Nikhil},
year = 2023,
month = {May},
booktitle = {Audio Engineering Society Convention 155},
url = {http://www.aes.org/e-lib/browse.cfm?elib=22261}
}
@book{dafx,
title = {DAFX - Digital Audio Effects},
author = {Zöelzer, Udo},
year = 2011,
publisher = {John Wiley \& Sons}
}
@inproceedings{diaz_rigid-body_2022,
title = {Rigid-Body Sound Synthesis with Differentiable Modal Resonators},
author = {Diaz, Rodrigo and Hayes, Ben and Saitis, Charalampos and Fazekas, Gy{\"o}rgy and Sandler, Mark},
year = 2023,
booktitle = {IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
organization = {IEEE}
}
@inproceedings{engel_ddsp_2020,
title = {{{DDSP}}: {{Differentiable Digital Signal Processing}}},
shorttitle = {{{DDSP}}},
author = {Engel, Jesse and Hantrakul, Lamtharn (Hanoi) and Gu, Chenjie and Roberts, Adam},
year = 2020,
month = apr,
booktitle = {8th {{International Conference}} on {{Learning Representations}}},
urldate = {2020-01-29}
}
@inproceedings{engel_gansynth_2019,
title = {{GANS}ynth: Adversarial Neural Audio Synthesis},
author = {Jesse Engel and Kumar Krishna Agrawal and Shuo Chen and Ishaan Gulrajani and Chris Donahue and Adam Roberts},
year = 2019,
booktitle = {International Conference on Learning Representations},
url = {https://openreview.net/forum?id=H1xQVn09FX}
}
@inproceedings{engel_self-supervised_2020,
title = {Self-supervised Pitch Detection by Inverse Audio Synthesis},
author = {Jesse Engel and Rigel Swavely and Lamtharn Hanoi Hantrakul and Adam Roberts and Curtis Hawthorne},
year = 2020,
booktitle = {ICML 2020 Workshop on Self-supervision in Audio and Speech},
url = {https://openreview.net/forum?id=RlVTYWhsky7}
}
@inproceedings{engel2017neural,
title = {Neural audio synthesis of musical notes with wavenet autoencoders},
author = {Engel, Jesse and Resnick, Cinjon and Roberts, Adam and Dieleman, Sander and Norouzi, Mohammad and Eck, Douglas and Simonyan, Karen},
year = 2017,
booktitle = {International Conference on Machine Learning},
pages = {1068--1077},
organization = {PMLR}
}
@article{fant1995lf,
title = {The LF-model revisited. Transformations and frequency domain analysis},
author = {Fant, Gunnar},
year = 1995,
journal = {Speech Trans. Lab. Q. Rep., Royal Inst. of Tech. Stockholm},
volume = 2,
number = 3,
pages = 40
}
@article{gatys_neural_2015,
title = {A {{Neural Algorithm}} of {{Artistic Style}}},
author = {Gatys, Leon A. and Ecker, Alexander S. and Bethge, Matthias},
year = 2015,
journal = {Journal of Vision},
volume = 16,
number = 12,
pages = 326,
doi = {10.1167/16.12.326}
}
@article{golf,
title = {Singing Voice Synthesis Using Differentiable LPC and Glottal-Flow-Inspired Wavetables},
author = {Yu, Chin-Yun and Fazekas, Gy{\"o}rgy},
year = 2023,
journal = {arXiv preprint arXiv:2306.17252}
}
@inproceedings{hayes_neural_2021,
title = {Neural {{Waveshaping Synthesis}}},
author = {Hayes, Ben and Saitis, Charalampos and Fazekas, Gy{\"o}rgy},
year = 2021,
month = nov,
booktitle = {Proceedings of the 22nd {{International Society}} for {{Music Information Retrieval Conference}}},
address = {{Online}},
copyright = {All rights reserved}
}
@misc{hayes_review_2023,
title = {A {{Review}} of {{Differentiable Digital Signal Processing}} for {{Music}} \& {{Speech Synthesis}}},
author = {Hayes, Ben and Shier, Jordie and Fazekas, Gy{\"o}rgy and McPherson, Andrew and Saitis, Charalampos},
year = 2023,
month = aug,
publisher = {{arXiv}},
number = {arXiv:2308.15422},
doi = {10.48550/arXiv.2308.15422},
urldate = {2023-09-05},
eprint = {2308.15422},
primaryclass = {cs, eess}
}
@inproceedings{hayes2023sinusoidal,
title = {Sinusoidal Frequency Estimation by Gradient Descent},
author = {Hayes, Ben and Saitis, Charalampos and Fazekas, Gy{\"o}rgy},
year = 2023,
booktitle = {ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {1--5},
organization = {IEEE}
}
@misc{huang_singing_2023,
title = {The {{Singing Voice Conversion Challenge}} 2023},
author = {Huang, Wen-Chin and Violeta, Lester Phillip and Liu, Songxiang and Shi, Jiatong and Toda, Tomoki},
year = 2023,
month = jul,
publisher = {{arXiv}},
number = {arXiv:2306.14422},
urldate = {2023-07-25},
note = {{arXiv} [Prerint]. Available at \url{https://doi.org/10.48550/arXiv.2306.14422} (Accessed 2023-07-25)},
eprint = {2306.14422},
primaryclass = {cs, eess},
archiveprefix = {arxiv}
}
@article{jorda_performance_2019,
title = {Performance Evaluation of cuDNN Convolution Algorithms on NVIDIA Volta GPUs},
author = {Jordà, Marc and Valero-Lara, Pedro and Peña, Antonio J.},
year = 2019,
journal = {IEEE Access},
volume = 7,
number = {},
pages = {70461--70473},
doi = {10.1109/ACCESS.2019.2918851}
}
@inproceedings{juvela_gelp_2019,
title = {{GELP: GAN-Excited Linear Prediction for Speech Synthesis from Mel-Spectrogram}},
author = {Lauri Juvela and Bajibabu Bollepalli and Junichi Yamagishi and Paavo Alku},
year = 2019,
booktitle = {Proc. Interspeech 2019},
pages = {694--698},
doi = {10.21437/Interspeech.2019-2008}
}
@inproceedings{kelly_speech_1962,
title = {Speech Synthesis},
author = {Kelly, J. L. and Lochbaum, C. C.},
year = 1962,
month = sep,
booktitle = {Proceedings of the Fourth International Congress on Acoustics},
address = {Copenhagen},
pages = {1--4}
}
@inproceedings{kim2018crepe,
title = {Crepe: A convolutional representation for pitch estimation},
author = {Kim, Jong Wook and Salamon, Justin and Li, Peter and Bello, Juan Pablo},
year = 2018,
booktitle = {2018 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {161--165},
organization = {IEEE}
}
@inproceedings{kong_hifi-gan_2020,
title = {HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis},
author = {Kong, Jungil and Kim, Jaehyeon and Bae, Jaekyoung},
year = 2020,
booktitle = {Proceedings of the 34th International Conference on Neural Information Processing Systems},
location = {Vancouver, BC, Canada},
publisher = {Curran Associates Inc.},
address = {Red Hook, NY, USA},
series = {NIPS'20},
isbn = 9781713829546,
abstract = {Several recent work on speech synthesis have employed generative adversarial networks (GANs) to produce raw waveforms. Although such methods improve the sampling efficiency and memory usage, their sample quality has not yet reached that of autoregressive and flow-based generative models. In this work, we propose HiFi-GAN, which achieves both efficient and high-fidelity speech synthesis. As speech audio consists of sinusoidal signals with various periods, we demonstrate that modeling periodic patterns of an audio is crucial for enhancing sample quality. A subjective human evaluation (mean opinion score, MOS) of a single speaker dataset indicates that our proposed method demonstrates similarity to human quality while generating 22.05 kHz high-fidelity audio 167.9 times faster than real-time on a single V100 GPU. We further show the generality of HiFi-GAN to the mel-spectrogram inversion of unseen speakers and end-to-end speech synthesis. Finally, a small footprint version of HiFi-GAN generates samples 13.4 times faster than real-time on CPU with comparable quality to an autoregressive counterpart.},
articleno = 1428,
numpages = 12
}
@article{liu2023ddsp,
title = {DDSP-SFX: Acoustically-guided sound effects generation with differentiable digital signal processing},
author = {Liu, Yunyi and Jin, Craig and Gunawan, David},
year = 2023,
journal = {arXiv preprint arXiv:2309.08060}
}
@article{makhoul1975linear,
title = {Linear prediction: A tutorial review},
author = {Makhoul, John},
year = 1975,
journal = {Proceedings of the IEEE},
publisher = {IEEE},
volume = 63,
number = 4,
pages = {561--580}
}
@article{masuda_improving_2023,
title = {Improving {{Semi-Supervised Differentiable Synthesizer Sound Matching}} for {{Practical Applications}}},
author = {Masuda, Naotake and Saito, Daisuke},
year = 2023,
journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},
volume = 31,
pages = {863--875},
doi = {10.1109/TASLP.2023.3237161},
issn = {2329-9304}
}
@inproceedings{nercessian_differentiable_2023,
title = {Differentiable {{WORLD Synthesizer-Based Neural Vocoder With Application To End-To-End Audio Style Transfer}}},
author = {Nercessian, Shahan},
year = 2023,
month = may,
booktitle = {Audio {{Engineering Society Convention}} 154},
url = {https://www.aes.org/e-lib/browse.cfm?elib=22073},
urldate = {2023-06-21}
}
@inproceedings{nercessian2021lightweight,
title = {Lightweight and interpretable neural modeling of an audio distortion effect using hyperconditioned differentiable biquads},
author = {Nercessian, Shahan and Sarroff, Andy and Werner, Kurt James},
year = 2021,
booktitle = {ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
pages = {890--894},
organization = {IEEE}
}
@inproceedings{oord_parallel_2018,
title = {Parallel {W}ave{N}et: Fast High-Fidelity Speech Synthesis},
author = {van den Oord, Aaron and Li, Yazhe and Babuschkin, Igor and Simonyan, Karen and Vinyals, Oriol and Kavukcuoglu, Koray and van den Driessche, George and Lockhart, Edward and Cobo, Luis and Stimberg, Florian and Casagrande, Norman and Grewe, Dominik and Noury, Seb and Dieleman, Sander and Elsen, Erich and Kalchbrenner, Nal and Zen, Heiga and Graves, Alex and King, Helen and Walters, Tom and Belov, Dan and Hassabis, Demis},
year = 2018,
month = {10--15 Jul},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
publisher = {PMLR},
series = {Proceedings of Machine Learning Research},
volume = 80,
pages = {3918--3926},
url = {https://proceedings.mlr.press/v80/oord18a.html},
editor = {Dy, Jennifer and Krause, Andreas},
pdf = {http://proceedings.mlr.press/v80/oord18a/oord18a.pdf},
abstract = {The recently-developed WaveNet architecture is the current state of the art in realistic speech synthesis, consistently rated as more natural sounding for many different languages than any previous system. However, because WaveNet relies on sequential generation of one audio sample at a time, it is poorly suited to today’s massively parallel computers, and therefore hard to deploy in a real-time production setting. This paper introduces Probability Density Distillation, a new method for training a parallel feed-forward network from a trained WaveNet with no significant difference in quality. The resulting system is capable of generating high-fidelity speech samples at more than 20 times faster than real-time, a 1000x speed up relative to the original WaveNet, and capable of serving multiple English and Japanese voices in a production setting.}
}
@inproceedings{oord_wavenet_2016,
title = {WaveNet: A Generative Model for Raw Audio},
author = {Aäron van den Oord and Sander Dieleman and Heiga Zen and Karen Simonyan and Oriol Vinyals and Alexander Graves and Nal Kalchbrenner and Andrew Senior and Koray Kavukcuoglu},
year = 2016,
booktitle = {Arxiv},
url = {https://arxiv.org/abs/1609.03499}
}
@inproceedings{pons_upsampling_2021,
title = {Upsampling Artifacts in Neural Audio Synthesis},
author = {Pons, Jordi and Pascual, Santiago and Cengarle, Giulio and Serrà, Joan},
year = 2021,
booktitle = {ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
volume = {},
number = {},
pages = {3005--3009},
doi = {10.1109/ICASSP39728.2021.9414913}
}
@inproceedings{renault_differentiable_2022,
title = {Differentiable {{Piano Model}} for {{Midi-to-Audio Performance Synthesis}}},
author = {Renault, Lenny and Mignot, R{\'e}mi and Roebel, Axel},
year = 2022,
booktitle = {Proceedings of the 25th {{International Conference}} on {{Digital Audio Effects}}},
address = {{Vienna, Austria}},
pages = 8
}
@article{schulze-forster_unsupervised_2023,
title = {Unsupervised Music Source Separation Using Differentiable Parametric Source Models},
author = {Schulze-Forster, Kilian and Richard, Gaël and Kelley, Liam and Doire, Clement S. J. and Badeau, Roland},
year = 2023,
journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},
volume = 31,
number = {},
pages = {1276--1289},
doi = {10.1109/TASLP.2023.3252272}
}
@article{serra_spectral_1990,
title = {Spectral {{Modeling Synthesis}}: {{A Sound Analysis}}/{{Synthesis System Based}} on a {{Deterministic Plus Stochastic Decomposition}}},
shorttitle = {Spectral {{Modeling Synthesis}}},
author = {Serra, Xavier and Smith, Julius},
year = 1990,
journal = {Computer Music Journal},
volume = 14,
number = 4,
pages = {12--24},
doi = {10.2307/3680788},
issn = {0148-9267},
url = {www.jstor.org/stable/3680788},
urldate = {2019-12-21}
}
@inproceedings{shan_differentiable_2022,
title = {Differentiable {{Wavetable Synthesis}}},
author = {Shan, Siyuan and Hantrakul, Lamtharn and Chen, Jitong and Avent, Matt and Trevelyan, David},
year = 2022,
month = may,
journal = {arXiv:2111.10003 [cs, eess]},
booktitle = {{{ICASSP}} 2022 - 2022 {{IEEE International Conference}} on {{Acoustics}}, {{Speech}} and {{Signal Processing}} ({{ICASSP}})},
pages = {4598--4602},
doi = {10.1109/ICASSP43922.2022.9746940},
issn = {2379-190X},
url = {http://arxiv.org/abs/2111.10003},
urldate = {2022-03-12},
note = {ISSN: 2379-190X}
}
@inproceedings{shier2023differentiable,
title = {Differentiable Modelling of Percussive Audio with Transient and Spectral Synthesis},
author = {Shier, Jordie and Caspe, Franco and Robertson, Andrew and Sandler, Mark and Saitis, Charalampos and McPherson, Andrew},
year = 2023,
booktitle = {Proceedings of the 10th Convention of the European Acoustics Association Forum Acusticum 2023}
}
@book{smith_filters_2007,
title = {Introduction to Digital Filters with Audio Applications},
author = {Julius O. Smith},
year = 2007,
publisher = {W3K Publishing},
address = {http://www.w3k.org/books/},
isbn = {978-0-9745607-1-7}
}
@inproceedings{song_dspgan_2023,
title = {{{DSPGAN}}: {{A Gan-Based Universal Vocoder}} for {{High-Fidelity TTS}} by {{Time-Frequency Domain Supervision}} from {{DSP}}},
shorttitle = {{{DSPGAN}}},
author = {Song, Kun and Zhang, Yongmao and Lei, Yi and Cong, Jian and Li, Hanzhao and Xie, Lei and He, Gang and Bai, Jinfeng},
year = 2023,
month = jun,
booktitle = {{{ICASSP}} 2023 - 2023 {{IEEE International Conference}} on {{Acoustics}}, {{Speech}} and {{Signal Processing}} ({{ICASSP}})},
pages = {1--5},
doi = {10.1109/ICASSP49357.2023.10095105}
}
@inproceedings{sudholt_vocal_2023,
title = {Vocal Tract Area Estimation by Gradient Descent},
author = {S{\"u}dholt, David and Cámara, Mateo and Xu, Zhiyuan and Reiss, Joshua D.},
year = 2023,
booktitle = {Proceedings of the 26th {{International Conference}} on {{Digital Audio Effects}}},
address = {{Copenhagen, Denmark}}
}
@inproceedings{turian_one_2021,
title = {One {{Billion Audio Sounds}} from {{GPU-enabled Modular Synthesis}}},
author = {Turian, Joseph and Shier, Jordie and Tzanetakis, George and McNally, Kirk and Henry, Max},
year = 2021,
booktitle = {Proceedings of the 23rd International Conference on Digital Audio Effects}
}
@inproceedings{wang_neural_2019,
title = {Neural {{Harmonic-plus-Noise Waveform Model}} with {{Trainable Maximum Voice Frequency}} for {{Text-to-Speech Synthesis}}},
author = {Wang, Xin and Yamagishi, Junichi},
year = 2019,
month = sep,
booktitle = {10th {{ISCA Workshop}} on {{Speech Synthesis}} ({{SSW}} 10)},
publisher = {{ISCA}},
pages = {1--6},
doi = {10.21437/SSW.2019-1},
urldate = {2023-07-04},
abstract = {Neural source-filter (NSF) models are deep neural networks that produce waveforms given input acoustic features. They use dilated-convolution-based neural filter modules to filter sinebased excitation for waveform generation, which is different from WaveNet and flow-based models. One of the NSF models, called harmonic-plus-noise NSF (h-NSF) model, uses separate pairs of source and neural filters to generate harmonic and noise waveform components. It is close to WaveNet in terms of speech quality while being superior in generation speed.},
file = {/Users/benhayes/Zotero/storage/J6NCJG6J/Wang and Yamagishi - 2019 - Neural Harmonic-plus-Noise Waveform Model with Tra.pdf}
}
@article{wang2019neural,
title = {Neural source-filter waveform models for statistical parametric speech synthesis},
author = {Wang, Xin and Takaki, Shinji and Yamagishi, Junichi},
year = 2019,
journal = {IEEE/ACM Transactions on Audio, Speech, and Language Processing},
publisher = {IEEE},
volume = 28,
pages = {402--415}
}
@inproceedings{wu_ddsp-based_2022,
title = {{{DDSP-based Singing Vocoders}}: {{A New Subtractive-based Synthesizer}} and {{A Comprehensive Evaluation}}},
author = {Wu, Da-Yi and Hsiao, Wen-Yi and Yang, Fu-Rong and Friedman, Oscar and Jackson, Warren and Bruzenak, Scott and Liu, Yi-Wen and Yang, Yi-Hsuan},
year = 2022,
booktitle = {Proceedings of the 23rd International Society for Music Information Retrieval Conference},
pages = {76--83}
}
@inproceedings{wu_midi-ddsp_2022,
title = {{{MIDI-DDSP}}: {{Detailed}} Control of Musical Performance via Hierarchical Modeling},
author = {Wu, Yusong and Manilow, Ethan and Deng, Yi and Swavely, Rigel and Kastner, Kyle and Cooijmans, Tim and Courville, Aaron and Huang, Cheng-Zhi Anna and Engel, Jesse},
year = 2022,
booktitle = {International Conference on Learning Representations},
url = {https://openreview.net/forum?id=UseMOjWENv}
}
@inproceedings{ye_nas-fm_2023,
title = {{{NAS-FM}}: {{Neural Architecture Search}} for {{Tunable}} and {{Interpretable Sound Synthesis}} Based on {{Frequency Modulation}}},
author = {Ye, Zhen and Xue, Wei and Tan, Xu and Liu, Qifeng and Guo, Yike},
year = 2023,
booktitle = {Proceedings of the Thirty-Second International Joint Conference on Artificial Intelligence},
pages = {5869--5877},
doi = {10.24963/ijcai.2023/651}
}