Bio
Alana completed a B.Sc. in physics at the University of Montreal and a M.Sc. in medical physics at McGill University. She is currently pursuing a dual Ph.D. in physics at McGill University and University of Bordeaux.
Current Projects
Development of a dynamic shielding brachytherapy applicator for the treatment of rectal cancer
2022
Weishaupt, Luca L; Vuong, Te; Thibodeau-Antonacci, Alana; Garant, A; Singh, KS; Miller, C; Martin, A; Enger, Shirin A.
A121 QUANTIFYING INTER-OBSERVER VARIABILITY IN THE SEGMENTATION OF RECTAL TUMORS IN ENDOSCOPY IMAGES AND ITS EFFECTS ON DEEP LEARNING Journal Article
In: Journal of the Canadian Association of Gastroenterology, vol. 5, no. Supplement_1, pp. 140–142, 2022.
@article{weishaupt2022a121,
title = {A121 QUANTIFYING INTER-OBSERVER VARIABILITY IN THE SEGMENTATION OF RECTAL TUMORS IN ENDOSCOPY IMAGES AND ITS EFFECTS ON DEEP LEARNING},
author = {Luca L Weishaupt and Te Vuong and Alana Thibodeau-Antonacci and A Garant and KS Singh and C Miller and A Martin and Shirin A. Enger},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Journal of the Canadian Association of Gastroenterology},
volume = {5},
number = {Supplement_1},
pages = {140--142},
publisher = {Oxford University Press US},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Thibodeau-Antonacci, Alana; Vuong, Te; Liontis, B; Rayes, F; Pande, S; Enger, Shirin A.
Development of a Novel MRI-Compatible Applicator for Intensity Modulated Rectal Brachytherapy Inproceedings
In: MEDICAL PHYSICS, pp. E240–E240, WILEY 111 RIVER ST, HOBOKEN 07030-5774, NJ USA 2022.
@inproceedings{thibodeau2022development,
title = {Development of a Novel MRI-Compatible Applicator for Intensity Modulated Rectal Brachytherapy},
author = {Alana Thibodeau-Antonacci and Te Vuong and B Liontis and F Rayes and S Pande and Shirin A. Enger},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {MEDICAL PHYSICS},
volume = {49},
number = {6},
pages = {E240--E240},
organization = {WILEY 111 RIVER ST, HOBOKEN 07030-5774, NJ USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Thibodeau-Antonacci, Alana; Enger, Shirin A.; Bekerat, Hamed; Vuong, Te
Gafchromic film and scintillator detector measurements in phantom with a novel intensity-modulated brachytherapy endorectal shield Inproceedings
In: MEDICAL PHYSICS, pp. 5688–5689, WILEY 111 RIVER ST, HOBOKEN 07030-5774, NJ USA 2022.
@inproceedings{thibodeau2022gafchromic,
title = {Gafchromic film and scintillator detector measurements in phantom with a novel intensity-modulated brachytherapy endorectal shield},
author = {Alana Thibodeau-Antonacci and Shirin A. Enger and Hamed Bekerat and Te Vuong},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {MEDICAL PHYSICS},
volume = {49},
number = {8},
pages = {5688--5689},
organization = {WILEY 111 RIVER ST, HOBOKEN 07030-5774, NJ USA},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Weishaupt, Luca L.; Vuong, Te; Thibodeau-Antonacci, Alana; Garant, A; Singh, K; Miller, C; Martin, A; Schmitt-Ulms, F; Enger, Shirin A.
PO-1325 Automated rectal tumor segmentation with inter-observer variability-based uncertainty estimates Journal Article
In: Radiotherapy and Oncology, vol. 170, pp. S1120–S1121, 2022.
@article{weishaupt2022po,
title = {PO-1325 Automated rectal tumor segmentation with inter-observer variability-based uncertainty estimates},
author = {Luca L. Weishaupt and Te Vuong and Alana Thibodeau-Antonacci and A Garant and K Singh and C Miller and A Martin and F Schmitt-Ulms and Shirin A. Enger},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {Radiotherapy and Oncology},
volume = {170},
pages = {S1120--S1121},
publisher = {Elsevier},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2021
Thibodeau-Antonacci, Alana
Canada Graduate Scholarship – Doctoral Program award
2021.
@award{Thibodeau-Antonacci2021d,
title = {Canada Graduate Scholarship – Doctoral Program},
author = {Alana Thibodeau-Antonacci},
url = {https://www.nserc-crsng.gc.ca/students-etudiants/pg-cs/cgsd-bescd_eng.asp},
year = {2021},
date = {2021-09-01},
organization = {NSERC},
keywords = {},
pubstate = {published},
tppubtype = {award}
}
Weishaupt, Luca L.; Thibodeau-Antonacci, Alana; Garant, Aurelie; Singh, Kelita; Miller, Corey; Vuong, Té; Enger, Shirin A.
Deep learning based tumor segmentation of endoscopy images for rectal cancer patients Presentation
ESTRO Annual meeting, 27.08.2021.
@misc{Weishaupt2021b,
title = {Deep learning based tumor segmentation of endoscopy images for rectal cancer patients},
author = {Luca L. Weishaupt and Alana Thibodeau-Antonacci and Aurelie Garant and Kelita Singh and Corey Miller and Té Vuong and Shirin A. Enger},
url = {https://www.estro.org/Congresses/ESTRO-2021/610/posterdiscussion34-deep-learningforauto-contouring/3710/deeplearning-basedtumorsegmentationofendoscopyimag},
year = {2021},
date = {2021-08-27},
urldate = {2021-08-27},
abstract = {Purpose or Objective
The objective of this study was to develop an automated rectal tumor segmentation algorithm from endoscopy images. The algorithm will be used in a future multimodal treatment outcome prediction model. Currently, treatment outcome prediction models rely on manual segmentations of regions of interest, which are prone to inter-observer variability. To quantify this human error and demonstrate the feasibility of automated endoscopy image segmentation, we compare three deep learning architectures.
Material and Methods
A gastrointestinal physician (G1) segmented 550 endoscopy images of rectal tumors into tumor and non-tumor regions. To quantify the inter-observer variability, a second gastrointestinal physician (G2) contoured 319 of the images independently.
The 550 images and annotations from G1 were divided into 408 training, 82 validation, and 60 testing sets. Three deep learning architectures were trained; a fully convolutional neural network (FCN32), a U-Net, and a SegNet. These architectures have been used for robust medical image segmentation in previous studies.
All models were trained on a CPU supercomputing cluster. Data augmentation in the form of random image transformations, including scaling, rotation, shearing, Gaussian blurring, and noise addition, was used to improve the models' robustness.
The neural networks' output went through a final layer of noise removal and hole filling before evaluation. Finally, the segmentations from G2 and the neural networks' predictions were compared against the ground truth labels from G1.
Results
The FCN32, U-Net, and SegNet had average segmentation times of 0.77, 0.48, and 0.43 seconds per image, respectively. The average segmentation time per image for G1 and G2 were 10 and 8 seconds, respectively.
All the ground truth labels contained tumors, but G2 and the deep learning models did not always find tumors in the images. The scores are based on the agreement of tumor contours with G1’s ground truth and were thus only computed for images in which tumor was found. The automated segmentation algorithms consistently achieved equal or better scores than G2's manual segmentations. G2's low F1/DICE and precision scores indicate poor agreement between the manual contours.
Conclusion
There is a need for robust and accurate segmentation algorithms for rectal tumor segmentation since manual segmentation of these tumors is susceptible to significant inter-observer variability. The deep learning-based segmentation algorithms proposed in this study are more efficient and achieved a higher agreement with our manual ground truth segmentations than a second expert annotator. Future studies will investigate how to train deep learning models on multiple ground truth annotations to prevent learning observer biases.},
howpublished = {ESTRO Annual meeting},
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}
The objective of this study was to develop an automated rectal tumor segmentation algorithm from endoscopy images. The algorithm will be used in a future multimodal treatment outcome prediction model. Currently, treatment outcome prediction models rely on manual segmentations of regions of interest, which are prone to inter-observer variability. To quantify this human error and demonstrate the feasibility of automated endoscopy image segmentation, we compare three deep learning architectures.
Material and Methods
A gastrointestinal physician (G1) segmented 550 endoscopy images of rectal tumors into tumor and non-tumor regions. To quantify the inter-observer variability, a second gastrointestinal physician (G2) contoured 319 of the images independently.
The 550 images and annotations from G1 were divided into 408 training, 82 validation, and 60 testing sets. Three deep learning architectures were trained; a fully convolutional neural network (FCN32), a U-Net, and a SegNet. These architectures have been used for robust medical image segmentation in previous studies.
All models were trained on a CPU supercomputing cluster. Data augmentation in the form of random image transformations, including scaling, rotation, shearing, Gaussian blurring, and noise addition, was used to improve the models' robustness.
The neural networks' output went through a final layer of noise removal and hole filling before evaluation. Finally, the segmentations from G2 and the neural networks' predictions were compared against the ground truth labels from G1.
Results
The FCN32, U-Net, and SegNet had average segmentation times of 0.77, 0.48, and 0.43 seconds per image, respectively. The average segmentation time per image for G1 and G2 were 10 and 8 seconds, respectively.
All the ground truth labels contained tumors, but G2 and the deep learning models did not always find tumors in the images. The scores are based on the agreement of tumor contours with G1’s ground truth and were thus only computed for images in which tumor was found. The automated segmentation algorithms consistently achieved equal or better scores than G2's manual segmentations. G2's low F1/DICE and precision scores indicate poor agreement between the manual contours.
Conclusion
There is a need for robust and accurate segmentation algorithms for rectal tumor segmentation since manual segmentation of these tumors is susceptible to significant inter-observer variability. The deep learning-based segmentation algorithms proposed in this study are more efficient and achieved a higher agreement with our manual ground truth segmentations than a second expert annotator. Future studies will investigate how to train deep learning models on multiple ground truth annotations to prevent learning observer biases.
Thibodeau-Antonacci, Alana; Jafarzadeh, Hossein; Carroll, Liam; Weishaupt, Luca L.
Mitacs Globalink Research Award award
2021.
@award{Thibodeau-Antonacci2021c,
title = {Mitacs Globalink Research Award},
author = {Alana Thibodeau-Antonacci and Hossein Jafarzadeh and Liam Carroll and Luca L. Weishaupt},
url = {https://www.mitacs.ca/en/programs/globalink/globalink-research-award},
year = {2021},
date = {2021-07-01},
urldate = {2021-07-01},
organization = {MITACS},
abstract = {The Mitacs Globalink Research Award (GRA) supports research collaborations between Canada and select partner organizations and eligible countries and regions. It was awarded to Alana Thibodeau-Antonacci, Hossein Jafarzadeh, Liam Carroll and Luca L. Weishaupt.
Under the joint supervision of a home and host professor, successful senior undergraduate students, graduate students, as well as postdoctoral fellows will receive a $6,000 research award to conduct a 12- to 24-week research project in the other country. Awards are offered in partnership with Mitacs’s Canadian academic partners (and, in some cases, with Mitacs’s international partners) and are subject to available funding. },
howpublished = {Mitacs},
keywords = {},
pubstate = {published},
tppubtype = {award}
}
Under the joint supervision of a home and host professor, successful senior undergraduate students, graduate students, as well as postdoctoral fellows will receive a $6,000 research award to conduct a 12- to 24-week research project in the other country. Awards are offered in partnership with Mitacs’s Canadian academic partners (and, in some cases, with Mitacs’s international partners) and are subject to available funding.
Weishaupt, Luca L.; Thibodeau-Antonacci, Alana; Garant, Aurelie; Singh, Kelita; Miller, Corey; Vuong, Té; Enger, Shirin A.
Inter-Observer Variability and Deep Learning in Rectal Tumor Segmentation from Endoscopy Images Presentation
The COMP Annual Scientific Meeting 2021, 22.06.2021.
@misc{Weishaupt2021c,
title = {Inter-Observer Variability and Deep Learning in Rectal Tumor Segmentation from Endoscopy Images},
author = {Luca L. Weishaupt and Alana Thibodeau-Antonacci and Aurelie Garant and Kelita Singh and Corey Miller and Té Vuong and Shirin A. Enger},
year = {2021},
date = {2021-06-22},
urldate = {2021-06-22},
abstract = {Purpose
To develop an automated rectal tumor segmentation algorithm from endoscopy images.
Material/Methods
A gastrointestinal physician (G1) segmented 2005 endoscopy images into tumor and non-tumor
regions. To quantify the inter-observer variability, a second gastrointestinal physician (G2)
contoured the images independently.
Three deep-learning architectures used for robust medical image segmentation in previous
studies were trained: a fully convolutional neural network (FCN32), a U-Net, and a SegNet.
Since the majority of the images did not contain tumors, two methods were compared for
training. Models were trained using only tumor images (M1) and all images (M2). G1’s images
and annotations were divided into 408 training, 82 validation, and 60 testing sets for M1, 1181
training, 372 validation, and 452 testing sets for M2.
Finally, segmentations from G2 and neural networks' predictions were compared against ground
truth labels from G1, and F1 scores were computed for images where both physicians found
tumors.
Results
The deep-learning segmentation took less than 1 second, while manual segmentation took
approximately 10 seconds per image.
The M1’s models consistently achieved equal or better scores (SegNet F1:0.80±0.08) than G2's
manual segmentations (F1:0.68±0.25). G2's low F1/DICE and precision scores indicate poor
agreement between the manual contours. Models from M2 achieved lower scores than G2 and
M1’s models since they demonstrated a strong bias towards predicting no tumor for all images.
Conclusion
Future studies will investigate training on an equal number of images with/without tumor, using
ground truth contours from multiple experts simultaneously.},
howpublished = {The COMP Annual Scientific Meeting 2021},
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}
To develop an automated rectal tumor segmentation algorithm from endoscopy images.
Material/Methods
A gastrointestinal physician (G1) segmented 2005 endoscopy images into tumor and non-tumor
regions. To quantify the inter-observer variability, a second gastrointestinal physician (G2)
contoured the images independently.
Three deep-learning architectures used for robust medical image segmentation in previous
studies were trained: a fully convolutional neural network (FCN32), a U-Net, and a SegNet.
Since the majority of the images did not contain tumors, two methods were compared for
training. Models were trained using only tumor images (M1) and all images (M2). G1’s images
and annotations were divided into 408 training, 82 validation, and 60 testing sets for M1, 1181
training, 372 validation, and 452 testing sets for M2.
Finally, segmentations from G2 and neural networks' predictions were compared against ground
truth labels from G1, and F1 scores were computed for images where both physicians found
tumors.
Results
The deep-learning segmentation took less than 1 second, while manual segmentation took
approximately 10 seconds per image.
The M1’s models consistently achieved equal or better scores (SegNet F1:0.80±0.08) than G2's
manual segmentations (F1:0.68±0.25). G2's low F1/DICE and precision scores indicate poor
agreement between the manual contours. Models from M2 achieved lower scores than G2 and
M1’s models since they demonstrated a strong bias towards predicting no tumor for all images.
Conclusion
Future studies will investigate training on an equal number of images with/without tumor, using
ground truth contours from multiple experts simultaneously.
Morcos, Marc; Antaki, Majd; Thibodeau-Antonacci, Alana; Kalinowski, Jonathan; Glickman, Harry; Enger, Shirin A.
RapidBrachyMCTPS: An open-source dose calculation and optimization tool for brachytherapy research Presentation
COMP, 01.06.2021.
@misc{Morcos2021c,
title = {RapidBrachyMCTPS: An open-source dose calculation and optimization tool for brachytherapy research},
author = {Marc Morcos and Majd Antaki and Alana Thibodeau-Antonacci and Jonathan Kalinowski and Harry Glickman and Shirin A. Enger},
year = {2021},
date = {2021-06-01},
howpublished = {COMP},
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}
Thibodeau-Antonacci, Alana; Vuong, Té; Bekerat, Hamed; Liang, Liheng; Enger, Shirin A.
2021.
@award{Thibodeau-Antonacci2021b,
title = {Development of a Dynamic Shielding Intensity-Modulated Brachytherapy Applicator for the Treatment of Rectal Cancer},
author = {Alana Thibodeau-Antonacci and Té Vuong and Hamed Bekerat and Liheng Liang and Shirin A. Enger},
url = {https://curietherapi.es/},
year = {2021},
date = {2021-05-23},
urldate = {2021-05-23},
organization = {Curietherapies},
abstract = {Oral presentation given online at the annual congress of Curietherapies https://curietherapi.es/},
howpublished = {Annual Congress of Curietherapies},
keywords = {},
pubstate = {published},
tppubtype = {award}
}
Thibodeau-Antonacci, Alana; Vuong, Té; Bekerat, Hamed; Childress, Lilian; Enger, Shirin A.
OC-0112 development of a dynamic-shielding intensity modulated endorectal brachytherapy applicator Presentation
Radiotherapy and Oncology, 01.05.2021, ISBN: 0167-8140, 1879-0887.
@misc{Thibodeau-Antonacci2021,
title = {OC-0112 development of a dynamic-shielding intensity modulated endorectal brachytherapy applicator},
author = {Alana Thibodeau-Antonacci and Té Vuong and Hamed Bekerat and Lilian Childress and Shirin A. Enger},
url = {https://www.thegreenjournal.com/article/S0167-8140(21)06316-7/fulltext},
doi = {10.1016/S0167-8140(21)06316-7},
isbn = {0167-8140, 1879-0887},
year = {2021},
date = {2021-05-01},
abstract = {www.thegreenjournal.com},
howpublished = {Radiotherapy and Oncology},
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}