JACoW is a publisher in Geneva, Switzerland that publishes the proceedings of accelerator conferences held around the world by an international collaboration of editors.
@inproceedings{scheinker:napac2022-frxe1, author = {A. Scheinker and R.J. Roussel}, title = {{Bayesian Algorithms for Practical Accelerator Control and Adaptive Machine Learning for Time-Varying Systems}}, & booktitle = {Proc. NAPAC'22}, booktitle = {Proc. 5th Int. Particle Accel. Conf. (NAPAC'22)}, pages = {921--926}, eid = {FRXE1}, language = {english}, keywords = {network, controls, feedback, experiment, electron}, venue = {Albuquerque, NM, USA}, series = {International Particle Accelerator Conference}, number = {5}, publisher = {JACoW Publishing, Geneva, Switzerland}, month = {10}, year = {2022}, issn = {2673-7000}, isbn = {978-3-95450-232-5}, doi = {10.18429/JACoW-NAPAC2022-FRXE1}, url = {https://jacow.org/napac2022/papers/frxe1.pdf}, abstract = {{Particle accelerators are complicated machines with thousands of coupled time varying components. The electromagnetic fields of accelerator devices such as magnets and RF cavities drift and are uncertain due to external disturbances, vibrations, temperature changes, and hysteresis. Accelerated charged particle beams are complex objects with 6D phase space dynamics governed by collective effects such as space charge forces, coherent synchrotron radiation, and whose initial phase space distributions change in unexpected and difficult to measure ways. This two-part tutorial presents recent developments in Bayesian methods and adaptive machine learning (ML) techniques for accelerators. Part 1: We introduce Bayesian control algorithms, and we describe how these algorithms can be customized to solve practical accelerator specific problems, including online characterization and optimization. Part 2: We give an overview of adaptive ML (AML) combining adaptive model-independent feedback within physics-informed ML architectures to make ML tools robust to time-variation (distribution shift) and to enable their use further beyond the span of the training data without relying on re-training.}}, }