@article{riklin2014statistical,
title={Statistical Shape Analysis of Neuroanatomical Structures via Level-Set--based Shape Morphing},
author={Riklin Raviv, Tammy and Gao, Yi and Levitt, James J and Bouix, Sylvain},
journal={SIAM Journal on Imaging Sciences},
volume={7},
number={3},
pages={1645--1668},
year={2014},
publisher={SIAM}
}copy to clipboardGroupwise statistical analysis of the morphometry of brain structures plays an important role in neuroimaging studies. Nevertheless, most morphometric measurements are often limited to volume and surface area, as further morphological characterization of anatomical structures poses a significant challenge. In this paper, we present a method that allows the detection, localization, and quantification of statistically significant morphological differences in complex brain structures between populations. This is accomplished by a novel level-set framework for shape morphing and a multishape dissimilarity-measure derived by a modified version of the Hausdorff distance. The proposed method does not require explicit one-to-one point correspondences and is fast, robust, and easy to implement regardless of the topological complexity of the anatomical surface under study. The proposed model has been applied to well-defined regions of interest using both synthetic and real data sets. This includes the corpus callosum, striatum, caudate, amygdala-hippocampal complex, and superior temporal gyrus. These structures were selected for their importance with respect to brain regions implicated in a variety of neurological disorders. The synthetic databases allowed quantitative evaluations of the method. Results obtained with real clinical data of Williams syndrome and schizophrenia patients agree with published findings in the psychiatry literature.
@article{raviv2012multi,
title={Multi-modal brain tumor segmentation via latent atlases},
author={Riklin Raviv, T and Leemput, KV and Menze, Bjoern H},
journal={Proceeding MICCAIBRATS},
volume={64},
year={2012}
}copy to clipboardIn this work, a generative approach for patient-specific segmentation of brain tumors across different MR modalities is presented. It is based on the latent atlas approach presented in [7, 8]. The individual segmentation of each scan supports the segmentation of the ensemble by sharing common information. This common information, in the form of a spatial probability map of the tumor location is inferred concurrently with the evolution of the segmentations. The joint segmentation problem is solved via a statistically driven level-set framework. We illustrate the method on an example application of multimodal and longitudinal brain tumor segmentation, reporting promising segmentation results.
@article{ayache2005reviewers,
title={Reviewers--an acknowledgement},
author={Ayache, Nicholas and Duncan, James and Alberola-L{\'o}pez, Carlos and Alexander, Daniel and Arts, Theo and Bansal, Ravi and Bardinet, Eric and Bello, Fernando and Bengtsson, Ewert and Berger, Marie-Odile and others},
journal={Medical Image Analysis},
volume={9},
pages={612--613},
year={2005}
}copy to clipboard@inproceedings{dittrich2012spatio,
title={A spatio-temporal latent atlas for fetal brain segmentation},
author={Dittrich, E and Riklin Raviv, T and Kasprian, G and Brugger, P.C and Prayer, D and Langs, G and others},
year={2012},
organization={European Congress of Radiology-ECR 2012}
}copy to clipboardUltra-fast Magnetic Resonance Imaging (MRI) enables rich insights into early brain development. However, currently we lack a quantitative reference representing a population’s developmental characteristics and variability. Fig. 1 shows a consistently positioned slice in coronal MRIs depicting fetal brains at 20, 25 and 30 GWs. The rapid development the brain undergoes in this period is clearly visible. We propose a spatio-temporal latent atlas capturing the in-utero development of cerebral structures in healthy fetuses from 20-30 gestational weeks (GW). Additionally, we evaluate differences in developmental speed across a population.
@inproceedings{dittrich2011learning,
title={Learning a spatio-temporal latent atlas for fetal brain segmentation},
author={Dittrich, Eva and Riklin Raviv, Tammy and Kasprian, Gregor and Brugger, Peter C and Prayer, Daniela and Langs, Georg}
}copy to clipboardFetal Magnetic Resonance Imaging (MRI) in early phases of the cerebral development during gestation offers insights into the emergence of brain structures, their characteristics and variability across the population. To collect substantial bodies of observations automatic analysis of these data is necessary. However, automatic segmentation proofs challenging due to image quality, low contrast between brain tissues, and the rapid development at this early age. Current atlas-based segmentation approaches perform well in the adult population, but they are unable to cover the rapid changes during early development phases. In this paper, we introduce a spatio-temporal group-wise segmentation of fetal brain structures given a single annotated example. The method is based on an emerging spatio-temporal latent atlas that captures the agedependent characteristics in the training population, and supports the segmentation of brain structures. The proposed atlas makes segmentation of subcortical structures possible by integrating information across a large number of subjects. It encodes the average development and its variability, which is ultimately relevant for diagnosis. Furthermore, we introduce a method to re-estimate each subject’s age to accommodate variability in developmental speed. Results on 33 cases from 20th to 30th gestational week demonstrate improved segmentation results, and an estimate of average development.
@inproceedings{riklin2005prior,
title={Prior-based segmentation by projective registration and level sets},
booktitle={Tenth IEEE International Conference on Computer Vision (ICCV'05) Volume 1},
volume={1},
pages={204--211},
year={2005},
organization={IEEE}
}copy to clipboardObject detection and segmentation can be facilitated by the availability of a reference object. However, accounting for possible transformations between the different object views, as part of the segmentation process, remains a challenge. Recent works address this problem by using comprehensive training data. Other approaches are applicable only to limited object classes or can only accommodate similarity transformations. We suggest a novel variational approach to prior-based segmentation, which accounts for planar projective transformation, using a single reference object. The prior shape is registered concurrently with the segmentation process, without point correspondence. The algorithm detects the object of interest and correctly extracts its boundaries. The homography between the two object views is accurately recovered as well. Extending the Chan-Vese level set framework, we propose a region-based segmentation functional that includes explicit representation of the projective homography between the prior shape and the shape to segment. The formulation is derived from two-view geometry. Segmentation of a variety of objects is demonstrated and the recovered transformation is verified.
@article{pachter2025serum,
title={Serum Galectin-9 and Decorin in relation to brain aging and the green-Mediterranean diet: A secondary analysis of the DIRECT PLUS randomized trial},
author={Pachter, Dafna and Meir, Anat Y and Kaplan, Alon and Tsaban, Gal and Zelicha, Hila and Rinott, Ehud and Levakov, Gidon and Finkelstein, Ofek and Shelef, Ilan and Salti, Moti and Riklin Raviv, Tammy and others},
journal={Clinical Nutrition},
year={2025},
publisher={Elsevier}
}copy to clipboardBackground and aims
We explored whether changes in serum proteomic profiles differed between participants with distinct brain aging trajectories, and whether these changes were influenced by dietary intervention.
Methods
In this secondary analysis of the 18-month DIRECT PLUS trial, 294 participants were randomized to one of three arms: 1) Healthy dietary guidelines (HDG); 2) Mediterranean (MED) diet (+440 mg/day polyphenols from walnuts); or 3) low red/processed meat green-MED diet (+1240 mg/day polyphenols from walnuts, Mankai plant, and green tea). We measured 87 serum proteins (Olink-CVDII). We used Magnetic-Resonance-Imaging (MRI)-assessed 3D-T1-weighted brain scans for brain age calculation (by convolutional neural network) to identify protein markers reflecting the brain age gap (BAG; deviation of MRI-assessed brain age from chronological age).
Results
At baseline, lower weight, waist circumference, diastolic blood pressure, and HbA1c parameters were associated with a younger brain age than expected. Specifically, higher levels of two proteins, Galectin-9 (Gal-9) and Decorin (DCN), were associated with accelerated brain aging (larger BAG). A proteomics principal component analysis (PCA) revealed a difference in PC1 between the two time-points for participants with accelerated brain aging. Between baseline and 18 months, Gal-9 significantly decreased among individuals who completed the intervention with attenuated brain aging, while DCN significantly increased among those who completed the trial with accelerated brain aging. A significant interaction was observed between the green-MED diet and proteomics PCA, resulting in a beneficial change compared to the HDG. Participants in the green-MED diet significantly decreased Gal-9 compared to the HDG diet and from baseline.
Conclusions
Higher serum levels of Gal-9 and DCN may indicate an acceleration of brain aging and could be reduced by a green-MED/high-polyphenol (green tea and Mankai) and low-red/processed meat diet.
@article{pachter2024gal,
title={Gal-9 and DCN Serum Expression Reflect Accelerated Brain Aging and Are Attenuated by the Green-Mediterranean Diet: The 18-month DIRECT PLUS Proteomics-Brain MRI Trial},
author={Pachter, Dafna and Yaskolka Meir, Anat and Kaplan, Alon and Tsaban, Gal and Zelicha, Hila and Rinott, Ehud and Levakov, Gidon and Finkelstein, Ofek and Shelef, Ilan and Salti, Moti and Riklin Raviv, Tammy and others},
journal={medRxiv},
pages={2024--11},
year={2024},
publisher={Cold Spring Harbor Laboratory Press}
}copy to clipboardBackground
We recently reported that a green-Mediterranean (green-MED), high-polyphenol diet is potentially neuroprotective for age-related brain atrophy. Here, we explored the interplay between dietary intervention, proteomics profile, and accelerated brain age.
Methods
In the 18-month DIRECT PLUS trial, 294 participants (adherence rate=89%) were randomized to one of three arms: 1) Healthy dietary guidelines (HDG); 2) MED diet; or 3) green-MED diet. Both MED diets included 28g/day of walnuts. Additionally, the low red/processed meat green-MED group received daily supplements of polyphenol-rich green-tea and green Mankai aquatic plant. In this secondary analysis, we measured 87 serum proteins (Olink-CVDII) and conducted Magnetic Resonance Imaging (MRI) to obtain brain 3D-T1-weighted for brain age calculation based on brain convolutional neural network to identify protein markers reflecting the brain age gap (BAG: residual deviation of MRI-assessed brain age from chronological age).
Results
We analyzed eligible brain MRIs (216 at baseline and 18-month) for BAG calculation. At baseline (age=51.3yrs, 90% men), lower weight, waist circumference, diastolic blood pressure, and HbA1c parameters were associated with younger brain age than expected (p<0.05 for all). At baseline, higher levels of two specific proteins: Galectin-9 (Gal-9) and Decorin (DCN), were associated with larger BAG (accelerated brain aging; FDR<0.05). A proteomics principal component analysis (PCA) revealed a significant difference between the 18-month time points among participants who completed the trial with accelerated brain aging (p=0.02). Between baseline and 18 months, Gal-9 significantly decreased (p<0.05) among individuals who completed the intervention with attenuated brain age, and DCN significantly increased (p<0.05) among those who completed the trial with accelerated brain age. A significant interaction was observed between the green-MED diet and proteomics PCA change compared to the HDG (β=-1.7; p-interaction=0.05). Participants in the green-MED diet significantly decreased Gal-9 compared to the HDG diet (p=0.015) and from baseline (p=0.003). DCN levels, however, marginally increased in the HDG diet from baseline (p=0.053).
Conclusion
Higher serum levels of Gal-9 and DCN may indicate an acceleration of brain aging and might be reduced by the green-MED/high-polyphenol diet rich in Mankai and green-tea and low in red/processed meat.
@article{yang2024fine,
title={Fine scale hippocampus morphology variation cross healthy subjects from age to},
author={Yang, Qinzhu and Cai, Shuxiu and Chen, Guojing and Yu, Xiaxia and Cattell, Renee F and Riklin Raviv, Tammy and Huang, Chuan and Zhang, Nu and Gao, Yi},
journal={Advanced Machine Learning Approaches for Brain Mapping},
pages={141},
year={2024},
publisher={Frontiers Media SA}
}copy to clipboardThe cerebral cortex varies over the course of a person’s life span: at birth, the surface is smooth, before becoming more bumpy (deeper sulci and thicker gyri) in middle age, and thinner in senior years. In this work, a similar phenomenon was observed on the hippocampus. It was previously believed the fine-scale morphology of the hippocampus could only be extracted only with high field scanners (7T, 9.4T); however, recent studies show that regular 3T MR scanners can be sufficient for this purpose. This finding opens the door for the study of fine hippocampal morphology for a large amount of clinical data. In particular, a characteristic bump and subtle feature on the inferior aspect of the hippocampus, which we refer to as hippocampal dentation, presents a dramatic degree of variability between individuals from very smooth to highly dentated. In this report, we propose a combined method joining deep learning and sub-pixel level evolution to efficiently obtain fine-scale hippocampal segmentation on 552 healthy subjects. Through non-linear dentation extraction and fitting, we reveal that the bumpiness of the inferior surface of the human hippocampus has a clear temporal trend. It is bumpiest between 40 and 50 years old. This observation should be aligned with neurodevelopmental and aging stages.
@article{yang2024fine,
title={Fine hippocampal morphology analysis with a multi-dataset cross-sectional study on 2911 subjects},
author={Yang, Qinzhu and Chen, Guojing and Yang, Zhi and Riklin Raviv, Tammy and Gao, Yi},
journal={NeuroImage: Clinical},
volume={43},
pages={103620},
year={2024},
publisher={Elsevier}
}copy to clipboardCA1 subfield and subiculum of the hippocampus contain a series of dentate bulges, which are also called hippocampus dentation (HD). There have been several studies demonstrating an association between HD and brain disorders. Such as the number of hippocampal dentation correlates with temporal lobe epilepsy. And epileptic hippocampus have a lower number of dentation compared to contralateral hippocampus. However, most studies rely on subjective assessment by manual searching and counting in HD areas, which is time-consuming and labor-intensive to process large amounts of samples. And to date, only one objective method for quantifying HD has been proposed. Therefore, to fill this gap, we developed an automated and objective method to quantify HD and explore its relationship with neurodegenerative diseases. In this work, we performed a fine-scale morphological characterization of HD in 2911 subjects from four different cohorts of ADNI, PPMI, HCP, and IXI to quantify and explore differences between them in MR T1w images. The results showed that the degree of right hippocampal dentation are lower in patients with Alzheimer’s disease than samples in mild cognitive impairment or cognitively normal, whereas this change is not significant in Parkinson’s disease progression. The innovation of this paper that we propose a quantitative, robust, and fully automated method. These methodological innovation and corresponding results delineated above constitute the significance and novelty of our study. What’s more, the proposed method breaks through the limitations of manual labeling and is the first to quantitatively measure and compare HD in four different brain populations including thousands of subjects. These findings revealed new morphological patterns in the hippocampal dentation, which can help with subsequent fine-scale hippocampal morphology research.
@article{mavska2023cell,
title={The cell tracking challenge: 10 years of objective benchmarking},
author={Maška, Martin and Ulman, Vladimír and Delgado-Rodriguez, Pablo and Gómez-de-Mariscal, Estibaliz and Nečasová, Tereza and Guerrero Peña, Fidel A. and Ren, Tsang Ing and Meyerowitz, Elliot M. and Scherr and Riklin Raviv, Tammy and others},
journal={Nature Methods},
volume={20},
number={7},
pages={1010--1020},
year={2023},
publisher={Nature Publishing Group US New York}
}copy to clipboardThe Cell Tracking Challenge is an ongoing benchmarking initiative that has become a reference in cell segmentation and tracking algorithm development. Here, we present a significant number of improvements introduced in the challenge since our 2017 report. These include the creation of a new segmentation-only benchmark, the enrichment of the dataset repository with new datasets that increase its diversity and complexity, and the creation of a silver standard reference corpus based on the most competitive results, which will be of particular interest for data-hungry deep learning-based strategies. Furthermore, we present the up-to-date cell segmentation and tracking leaderboards, an in-depth analysis of the relationship between the performance of the state-of-the-art methods and the properties of the datasets and annotations, and two novel, insightful studies about the generalizability and the reusability of top-performing methods. These studies provide critical practical conclusions for both developers and users of traditional and machine learning-based cell segmentation and tracking algorithms.
@article{arbelle2022dual,
title={Dual-task ConvLSTM-UNet for instance segmentation of weakly annotated microscopy videos},
author={Arbelle, Assaf and Cohen, Shaked and Riklin Raviv, Tammy},
journal={IEEE Transactions on Medical Imaging},
volume={41},
number={8},
pages={1948--1960},
year={2022},
publisher={IEEE}
}copy to clipboardConvolutional Neural Networks (CNNs) are considered state of the art segmentation methods for biomedical images in general and microscopy sequences of living cells, in particular. The success of the CNNs is attributed to their ability to capture the structural properties of the data, which enables accommodating complex spatial structures of the cells, low contrast, and unclear boundaries. However, in their standard form CNNs do not exploit the temporal information available in time-lapse sequences, which can be crucial to separating touching and partially overlapping cell instances. In this work, we exploit cell dynamics using a novel CNN architecture which allows multi-scale spatio-temporal feature extraction. Specifically, a novel recurrent neural network (RNN) architecture is proposed based on the integration of a Convolutional Long Short Term Memory (ConvLSTM) network with the U-Net. The proposed ConvLSTM-UNet network is constructed as a dual-task network to enable training with weakly annotated data, in the form of approximate cell centers, termed markers, when the complete cells’ outlines are not available. We further use the fast marching method to facilitate the partitioning of clustered cells into individual connected components. Finally, we suggest an adaptation of the method for 3D microscopy sequences without drastically increasing the computational load. The method was evaluated on the Cell Segmentation Benchmark and was ranked among the top three methods on six submitted datasets. Exploiting the proposed built-in marker estimator we also present state-of-the-art cell detection results for an additional, publicly available, weekly annotated dataset.
@article{ben2020role,
title={The role of regularization in shaping weight and node pruning dependency and dynamics},
author={Ben-Guigui, Yael and Goldberger, Jacob and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:2012.03827},
year={2020}
}copy to clipboardThe pressing need to reduce the capacity of deep neural networks has stimulated the development of network dilution methods and their analysis. While the ability of L1 and L0 regularization to encourage sparsity is often mentioned, L2 regularization is seldom discussed in this context. We present a novel framework for weight pruning by sampling from a probability function that favors the zeroing of smaller weights. In addition, we examine the contribution of L_1 and L_2 regularization to the dynamics of node pruning while optimizing for weight pruning. We then demonstrate the effectiveness of the proposed stochastic framework when used together with a weight decay regularizer on popular classification models in removing 50% of the nodes in an MLP for MNIST classification, 60% of the filters in VGG-16 for CIFAR10 classification, and on medical image models in removing 60% of the channels in a U-Net for instance segmentation and 50% of the channels in CNN model for COVID-19 detection. For these node-pruned networks, we also present competitive weight pruning results that are only slightly less accurate than the original, dense networks.
@article{veksler2020slow,
title={Slow blood-to-brain transport underlies enduring barrier dysfunction in American football players},
author={Veksler, Ronel and Vazana, Udi and Serlin, Yonatan and Prager, Ofer and Ofer, Jonathan and Shemen, Nofar and Fisher, Andrew M and Minaeva, Olga and Hua, Ning and Saar-Ashkenazy, Rotem and Riklin Raviv, Tammy and others},
journal={Brain},
volume={143},
number={6},
pages={1826--1842},
year={2020},
publisher={Oxford University Press}
}copy to clipboardRepetitive mild traumatic brain injury in American football players has garnered increasing public attention following reports of chronic traumatic encephalopathy, a progressive tauopathy. While the mechanisms underlying repetitive mild traumatic brain injury-induced neurodegeneration are unknown and antemortem diagnostic tests are not available, neuropathology studies suggest a pathogenic role for microvascular injury, specifically blood–brain barrier dysfunction. Thus, our main objective was to demonstrate the effectiveness of a modified dynamic contrast-enhanced MRI approach we have developed to detect impairments in brain microvascular function. To this end, we scanned 42 adult male amateur American football players and a control group comprising 27 athletes practicing a non-contact sport and 26 non-athletes. MRI scans were also performed in 51 patients with brain pathologies involving the blood–brain barrier, namely malignant brain tumours, ischaemic stroke and haemorrhagic traumatic contusion. Based on data from prolonged scans, we generated maps that visualized the permeability value for each brain voxel. Our permeability maps revealed an increase in slow blood-to-brain transport in a subset of amateur American football players, but not in sex- and age-matched controls. The increase in permeability was region specific (white matter, midbrain peduncles, red nucleus, temporal cortex) and correlated with changes in white matter, which were confirmed by diffusion tensor imaging. Additionally, increased permeability persisted for months, as seen in players who were scanned both on- and off-season. Examination of patients with brain pathologies revealed that slow tracer accumulation characterizes areas surrounding the core of injury, which frequently shows fast blood-to-brain transport. Next, we verified our method in two rodent models: rats and mice subjected to repeated mild closed-head impact injury, and rats with vascular injury inflicted by photothrombosis. In both models, slow blood-to-brain transport was observed, which correlated with neuropathological changes. Lastly, computational simulations and direct imaging of the transport of Evans blue-albumin complex in brains of rats subjected to recurrent seizures or focal cerebrovascular injury suggest that increased cellular transport underlies the observed slow blood-to-brain transport. Taken together, our findings suggest dynamic contrast-enhanced-MRI can be used to diagnose specific microvascular pathology after traumatic brain injury and other brain pathologies.
@article{avi2019hue,
title={Hue-net: Intensity-based image-to-image translation with differentiable histogram loss functions},
author={Avi-Aharon, Mor and Arbelle, Assaf and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:1912.06044},
year={2019}
}copy to clipboardWe present the Hue-Net – a novel Deep Learning framework for Intensity-based Image-to-Image Translation. The key idea is a new technique termed network augmentation which allows a differentiable construction of intensity histograms from images. We further introduce differentiable representations of (1D) cyclic and joint (2D) histograms and use them for defining loss functions based on cyclic Earth Mover’s Distance (EMD) and Mutual Information (MI). While the Hue-Net can be applied to several image-to-image translation tasks, we choose to demonstrate its strength on color transfer problems, where the aim is to paint a source image with the colors of a different target image. Note that the desired output image does not exist and therefore cannot be used for supervised pixel-to-pixel learning. This is accomplished by using the HSV color-space and defining an intensity-based loss that is built on the EMD between the cyclic hue histograms of the output and the target images. To enforce color-free similarity between the source and the output images, we define a semantic-based loss by a differentiable approximation of the MI of these images. The incorporation of histogram loss functions in addition to an adversarial loss enables the construction of semantically meaningful and realistic images. Promising results are presented for different datasets.
@article{benou2019combining,
title={Combining white matter diffusion and geometry for tract-specific alignment and variability analysis},
author={Benou, Itay and Veksler, Ronel and Friedman, Alon and Riklin Raviv, Tammy},
journal={Neuroimage},
volume={200},
pages={674--689},
year={2019},
publisher={Elsevier}
}copy to clipboardWe present a framework for along-tract analysis of white matter (WM) fiber bundles based on diffusion tensor imaging (DTI) and tractography. We introduce the novel concept of fiber-flux density for modeling fiber tracts’ geometry, and combine it with diffusion-based measures to define vector descriptors called Fiber-Flux Diffusion Density (FFDD). The proposed model captures informative features of WM tracts at both the microscopic (diffusion-related) and macroscopic (geometry-related) scales, thus enabling improved sensitivity to subtle structural abnormalities that are not reflected by either diffusion or geometrical properties alone. A key step in this framework is the construction of an FFDD dissimilarity measure for sub-voxel alignment of fiber bundles, based on the fast marching method (FMM). The obtained aligned WM tracts enable meaningful inter-subject comparisons and group-wise statistical analysis. Moreover, we show that the FMM alignment can be generalized in a straight forward manner to a single-shot co-alignment of multiple fiber bundles. The proposed alignment technique is shown to outperform a well-established, commonly used DTI registration algorithm. We demonstrate the FFDD framework on the Human Connectome Project (HCP) diffusion MRI dataset, as well as on two different datasets of contact sports players. We test our method using longitudinal scans of a basketball player diagnosed with a traumatic brain injury, showing compatibility with structural MRI findings. We further perform a group study comparing mid- and post-season scans of 13 active football players exposed to repetitive head trauma, to 17 non-player control (NPC) subjects. Results reveal statistically significant FFDD differences (p-values<0.05) between the groups, as well as increased abnormalities over time at spatially-consistent locations within several major fiber tracts of football players.
@article{gilad2019fully,
title={Fully unsupervised symmetry-based mitosis detection in time-lapse cell microscopy},
author={Gilad, Topaz and Reyes, Jose and Chen, Jia-Yun and Lahav, Galit and Riklin Raviv, Tammy},
journal={Bioinformatics},
volume={35},
number={15},
pages={2644--2653},
year={2019},
publisher={Oxford University Press}
}copy to clipboardMotivation
Cell microscopy datasets have great diversity due to variability in cell types, imaging techniques and protocols. Existing methods are either tailored to specific datasets or are based on supervised learning, which requires comprehensive manual annotations. Using the latter approach, however, poses a significant difficulty due to the imbalance between the number of mitotic cells with respect to the entire cell population in a time-lapse microscopy sequence.
Results
We present a fully unsupervised framework for both mitosis detection and mother–daughters association in fluorescence microscopy data. The proposed method accommodates the difficulty of the different cell appearances and dynamics. Addressing symmetric cell divisions, a key concept is utilizing daughters’ similarity. Association is accomplished by defining cell neighborhood via a stochastic version of the Delaunay triangulation and optimization by dynamic programing. Our framework presents promising detection results for a variety of fluorescence microscopy datasets of different sources, including 2D and 3D sequences from the Cell Tracking Challenge.
@article{arbelle2019qanet,
title={QANet--Quality Assurance Network for Image Segmentation},
author={Arbelle, Assaf and Elul, Eliav and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:1904.08503},
year={2019}
}copy to clipboardWe introduce a novel Deep Learning framework, which quantitatively estimates image segmentation quality without the need for human inspection or labeling. We refer to this method as a Quality Assurance Network — QANet. Specifically, given an image and a `proposed’ corresponding segmentation, obtained by any method including manual annotation, the QANet solves a regression problem in order to estimate a predefined quality measure with respect to the unknown ground truth. The QANet is by no means yet another segmentation method. Instead, it performs a multi-level, multi-feature comparison of an image-segmentation pair based on a unique network architecture, called the RibCage.
To demonstrate the strength of the QANet, we addressed the evaluation of instance segmentation using two different datasets from different domains, namely, high throughput live cell microscopy images from the Cell Segmentation Benchmark and natural images of plants from the Leaf Segmentation Challenge. While synthesized segmentations were used to train the QANet, it was tested on segmentations obtained by publicly available methods that participated in the different challenges. We show that the QANet accurately estimates the scores of the evaluated segmentations with respect to the hidden ground truth, as published by the challenges’ organizers.
@article{arbelle2019qanet,
title={QANet-Quality Assurance Network for Microscopy Cell Segmentation.},
author={Arbelle, Assaf and Elul, Eliav and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:1904.08503},
year={2019}
}copy to clipboardTools and methods for automatic image segmentation are rapidly developing, each with its own strengths and weaknesses. While these methods are designed to be as general as possible, there are no guarantees for their performance on new data. The choice between methods is usually based on benchmark performance whereas the data in the benchmark can be significantly different than that of the user. We introduce a novel Deep Learning method which, given an image and a proposed corresponding segmentation, estimates the Intersection over Union measure (IoU) with respect to the unknown ground truth. We refer to this method as a Quality Assurance Network-QANet. The QANet is designed to give the user an estimate of the segmentation quality on the users own, private, data without the need for human inspection or labelling. It is based on the RibCage Network architecture, originally proposed as a discriminator in an adversarial network framework. Promising IoU prediction results are demonstrated based on the Cell Segmentation Benchmark.
@article{gordon2018multidimensional,
title={Multidimensional co-segmentation of longitudinal brain mri ensembles in the presence of a neurodegenerative process},
author={Gordon, Shiri and Dolgopyat, Irit and Kahn, Itamar and Riklin Raviv, Tammy},
journal={NeuroImage},
volume={178},
pages={346--369},
year={2018},
publisher={Elsevier}
}copy to clipboardMRI Segmentation of a pathological brain poses a significant challenge, as the available anatomical priors that provide top-down information to aid segmentation are inadequate in the presence of abnormalities. This problem is further complicated for longitudinal data capturing impaired brain development or neurodegenerative conditions, since the dynamic of brain atrophies has to be considered as well. For these cases, the absence of compatible annotated training examples renders the commonly used multi-atlas or machine-learning approaches impractical.
We present a novel segmentation approach that accounts for the lack of labeled data via multi-region multi-subject co-segmentation (MMCoSeg) of longitudinal MRI sequences. The underlying, unknown anatomy is learned throughout an iterative process, in which the segmentation of a region is supported both by the segmentation of the neighboring regions, which share common boundaries, and by the segmentation of corresponding regions, in the other jointly segmented images. A 4D multi-region atlas that models the spatio-temporal deformations and can be adapted to different subjects undergoing similar degeneration processes is reconstructed concurrently.
An inducible mouse model of p25 accumulation (the CK-p25 mouse) that displays key pathological hallmarks of Alzheimer disease (AD) is used as a gold-standard to test the proposed algorithm by providing a conditional control of rapid neurodegeneration. Applying the MMCoSeg to a cohort of CK-p25 mice and littermate controls yields promising segmentation results that demonstrate high compatibility with expertise manual annotations. An extensive comparative analysis with respect to current well-established, atlas-based segmentation methods highlights the advantage of the proposed approach, which provides accurate segmentation of longitudinal brain MRIs in pathological conditions, where only very few annotated examples are available.
@article{arbelle2018probabilistic,
title={A probabilistic approach to joint cell tracking and segmentation in high-throughput microscopy videos},
author={Arbelle, Assaf and Reyes, Jose and Chen, Jia-Yun and Lahav, Galit and Riklin Raviv, Tammy},
journal={Medical image analysis},
volume={47},
pages={140--152},
year={2018},
publisher={Elsevier}
}copy to clipboardWe present a novel computational framework for the analysis of high-throughput microscopy videos of living cells. The proposed framework is generally useful and can be applied to different datasets acquired in a variety of laboratory settings. This is accomplished by tying together two fundamental aspects of cell lineage construction, namely cell segmentation and tracking, via a Bayesian inference of dynamic models. In contrast to most existing approaches, which aim to be general, no assumption of cell shape is made. Spatial, temporal, and cross-sectional variation of the analysed data are accommodated by two key contributions. First, time series analysis is exploited to estimate the temporal cell shape uncertainty in addition to cell trajectory. Second, a fast marching (FM) algorithm is used to integrate the inferred cell properties with the observed image measurements in order to obtain image likelihood for cell segmentation, and association. The proposed approach has been tested on eight different time-lapse microscopy data sets, some of which are high-throughput, demonstrating promising results for the detection, segmentation and association of planar cells. Our results surpass the state of the art for the Fluo-C2DL-MSC data set of the Cell Tracking Challenge (Maška et al., 2014).
@article{arbellebayesian,
title={A Bayesian Approach for Joint Cell Tracking and Segmentation in Microscopy Videos},
author={Arbelle, Assaf and Riklin Raviv, Tammy}
}copy to clipboardWe have used an our novel joint segmentation and tracking algorithm which is an expansion of our method described in References[1]. We ran our method on three data sets, namely Fluo-C2DL-MSC, Fluo-N2DH-GOWT1 and Fluo-N2DH-SIM+. The parameters set for each data set is presented in Table 1.
@incollection{levitt2014connectivity,
title={Connectivity-based Parcellation of the Striatum in Schizophrenia Using Diffusion Weighted Imaging (DWI)},
author={Levitt, James and Rathi, Yogesh and McCarley, Robert and Shenton, Martha and Riklin Raviv, Tammy},
booktitle={NEUROPSYCHOPHARMACOLOGY},
pages={S221--S222},
year={2014}
}copy to clipboardBackground: Frontostriatal white matter connectivity may be disrupted in schizophrenia. The striatum and frontal cortex can be divided into limbic (L), associative (DLPFC (A1); VLPFC (A2)) and sensorimotor (SM) functional subregions, which are connected via corticostriatal white matter tracts. We hypothesized fewer connections in schizophrenia, in particular, in limbic and associative frontostriatal pathways.
@article{gao2014shape,
title={Shape analysis, a field in need of careful validation},
author={Gao, Yi and Riklin Raviv, Tammy and Bouix, Sylvain},
journal={Human brain mapping},
volume={35},
number={10},
pages={4965--4978},
year={2014},
publisher={Wiley Online Library}
}copy to clipboardIn the last two decades, the statistical analysis of shape has become an actively studied field and finds applications in a wide range of areas. In addition to algorithmic development, many researchers have distributed end-user orientated toolboxes, which further enable the utilization of the algorithms in an “off the shelf” fashion. However, there is little work on the evaluation and validation of these techniques, which poses a rather serious challenge when interpreting their results. To address this lack of validation, we design a validation framework and then use it to test some of the most widely used toolboxes. Our initial results show inconsistencies and disagreement among four different methods. We believe this type of analysis to be critical not only for the community of algorithm designers but also perhaps more importantly to researchers who use these tools without knowing the algorithm details and seek objective criteria for tool selection.
@article{wahlby2012image,
title={An image analysis toolbox for high-throughput C. elegans assays},
author={Wählby, Carolina and Kamentsky, Lee and Liu, Zihan H and Riklin Raviv, Tammy and Conery, Annie L and O'Rourke, Eyleen J and Sokolnicki, Katherine L and Visvikis, Orane and Ljosa, Vebjorn and Irazoqui, Javier E and others},
journal={Nature methods},
volume={9},
number={7},
pages={714--716},
year={2012},
publisher={Nature Publishing Group US New York}
}copy to clipboardWe present a toolbox for high-throughput screening of image-based Caenorhabditis elegans phenotypes. The image analysis algorithms measure morphological phenotypes in individual worms and are effective for a variety of assays and imaging systems. This WormToolbox is available through the open-source CellProfiler project and enables objective scoring of whole-worm high-throughput image-based assays of C. elegans for the study of diverse biological pathways that are relevant to human disease.
@article{riklin2010segmentation,
title={Segmentation of image ensembles via latent atlases},
author={Riklin Raviv, Tammy and Van Leemput, Koen and Menze, Bjoern H and Wells III, William M and Golland, Polina},
journal={Medical image analysis},
volume={14},
number={5},
pages={654--665},
year={2010},
publisher={Elsevier}
}copy to clipboardSpatial priors, such as probabilistic atlases, play an important role in MRI segmentation. However, the availability of comprehensive, reliable and suitable manual segmentations for atlas construction is limited. We therefore propose a method for joint segmentation of corresponding regions of interest in a collection of aligned images that does not require labeled training data. Instead, a latent atlas, initialized by at most a single manual segmentation, is inferred from the evolving segmentations of the ensemble. The algorithm is based on probabilistic principles but is solved using partial differential equations (PDEs) and energy minimization criteria. We evaluate the method on two datasets, segmenting subcortical and cortical structures in a multi-subject study and extracting brain tumors in a single-subject multi-modal longitudinal experiment. We compare the segmentation results to manual segmentations, when those exist, and to the results of a state-of-the-art atlas-based segmentation method. The quality of the results supports the latent atlas as a promising alternative when existing atlases are not compatible with the images to be segmented.
@inproceedings{raviv2009joint,
title={Joint segmentation via patient-specific latent anatomy model},
author={Riklin Raviv, T and Stieltjes, B and Weber, M.A and Ayache, N and Golland, P},
year={2009}
}copy to clipboardWe present a generative approach for joint 3D segmentation of patient-specific MR scans across different modalities or time points. The latent anatomy, in the form of spatial parameters, is inferred simultaneously with the evolution of the segmentations. The individual segmentation of each scan supports the segmentation of the group by sharing common information. The joint segmentation problem is solved via a statistically driven level-set framework. We illustrate the method on an example application of multimodal and longitudinal brain tumor segmentation, reporting promising segmentation results.
@article{riklin2009symmetry,
title={On symmetry, perspectivity, and level-set-based segmentation},
author={Riklin Raviv, Tammy and Sochen, Nir and Kiryati, Nahum},
journal={IEEE transactions on pattern analysis and machine intelligence},
volume={31},
number={8},
pages={1458--1471},
year={2009},
publisher={IEEE}
}copy to clipboardWe introduce a novel variational method for the extraction of objects with either bilateral or rotational symmetry in the presence of perspective distortion. Information on the symmetry axis of the object and the distorting transformation is obtained as a by–product of the segmentation process. The key idea is the use of a flip or a rotation of the image to segment as if it were another view of the object. We call this generated image the symmetrical counterpart image. We show that the symmetrical counterpart image and the source image are related by planar projective homography. This homography is determined by the unknown planar projective transformation that distorts the object symmetry. The proposed segmentation method uses a level-set-based curve evolution technique. The extraction of the object boundaries is based on the symmetry constraint and the image data. The symmetrical counterpart of the evolving level-set function provides a dynamic shape prior. It supports the segmentation by resolving possible ambiguities due to noise, clutter, occlusions, and assimilation with the background. The homography that aligns the symmetrical counterpart to the source level-set is recovered via a registration process carried out concurrently with the segmentation. Promising segmentation results of various images of approximately symmetrical objects are shown.
@article{riklin2008shape,
title={Shape-based mutual segmentation},
author={Riklin Raviv, Tammy and Sochen, Nir and Kiryati, Nahum},
journal={International Journal of Computer Vision},
volume={79},
number={3},
pages={231--245},
year={2008},
publisher={Springer}
}copy to clipboardWe present a novel variational approach for simultaneous segmentation of two images of the same object taken from different viewpoints. Due to noise, clutter and occlusions, neither of the images contains sufficient information for correct object-background partitioning. The evolving object contour in each image provides a dynamic prior for the segmentation of the other object view. We call this process mutual segmentation. The foundation of the proposed method is a unified level-set framework for region and edge based segmentation, associated with a shape similarity term. The suggested shape term incorporates the semantic knowledge gained in the segmentation process of the image pair, accounting for excess or deficient parts in the estimated object shape. Transformations, including planar projectivities, between the object views are accommodated by a registration process held concurrently with the segmentation. The proposed segmentation algorithm is demonstrated on a variety of image pairs. The homography between each of the image pairs is estimated and its accuracy is evaluated.
@article{riklin2007prior,
title={Prior-based segmentation and shape registration in the presence of perspective distortion},
author={Riklin Raviv, Tammy and Kiryati, Nahum and Sochen, Nir},
journal={International Journal of Computer Vision},
volume={72},
number={3},
pages={309--328},
year={2007},
publisher={Springer}
}copy to clipboardChallenging object detection and segmentation tasks can be facilitated by the availability of a reference object. However, accounting for possible transformations between the different object views, as part of the segmentation process, remains difficult. Recent statistical methods address this problem by using comprehensive training data. Other techniques can only accommodate similarity transformations. We suggest a novel variational approach to prior-based segmentation, using a single reference object, that accounts for planar projective transformation. Generalizing the Chan-Vese level set framework, we introduce a novel shape-similarity measure and embed the projective homography between the prior shape and the image to segment within a region-based segmentation functional. The proposed algorithm detects the object of interest, extracts its boundaries, and concurrently carries out the registration to the prior shape. We demonstrate prior-based segmentation on a variety of images and verify the accuracy of the recovered transformation parameters.
@inproceedings{riklin2004unlevel,
title={Unlevel-sets: Geometry and prior-based segmentation},
author={Riklin Raviv, Tammy and Kiryati, Nahum and Sochen, Nir},
booktitle={European Conference on Computer Vision},
pages={50--61},
year={2004},
organization={Springer}
}copy to clipboardWe present a novel variational approach to top-down image segmentation, which accounts for significant projective transformations between a single prior image and the image to be segmented. The proposed segmentation process is coupled with reliable estimation of the transformation parameters, without using point correspondences. The prior shape is represented by a generalized cone that is based on the contour of the reference object. Its unlevel sections correspond to possible instances of the visible contour under perspective distortion and scaling. We extend the Chan-Vese energy functional by adding a shape term. This term measures the distance between the currently estimated section of the generalized cone and the region bounded by the zero-crossing of the evolving level set function. Promising segmentation results are obtained for images of rotated, translated, corrupted and partly occluded objects. The recovered transformation parameters are compatible with the ground truth.
@article{shashua2001quotient,
title={The quotient image: Class-based re-rendering and recognition with varying illuminations},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume={23},
number={2},
pages={129--139},
year={2001},
publisher={IEEE}
}copy to clipboardThe paper addresses the problem of “class-based” image-based recognition and rendering with varying illumination. The rendering problem is defined as follows: Given a single input image of an object and a sample of images with varying illumination conditions of other objects of the same general class, re-render the input image to simulate new illumination conditions. The class-based recognition problem is similarly defined: Given a single image of an object in a database of images of other objects, some of them multiply sampled under varying illumination, identify (match) any novel image of that object under varying illumination with the single image of that object in the database. We focus on Lambertian surface classes and, in particular, the class of human faces. The key result in our approach is based on a definition of an illumination invariant signature image which enables an analytic generation of the image space with varying illumination. We show that a small database of objects-in our experiments as few as two objects-is sufficient for generating the image space with varying illumination of any new object of the class from a single input image of that object. In many cases, the recognition results outperform by far conventional methods and the re-rendering is of remarkable quality considering the size of the database of example images and the mild preprocess required for making the algorithm work.
@inproceedings{arbelle2019microscopy,
title={Microscopy cell segmentation via convolutional LSTM networks},
author={Arbelle, Assaf and Riklin Raviv, Tammy},
booktitle={2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI 2019)},
pages={1008--1012},
year={2019},
organization={IEEE}
}copy to clipboardLive cell microscopy sequences exhibit complex spatial structures and complicated temporal behaviour, making their analysis a challenging task. Considering cell segmentation problem, which plays a significant role in the analysis, the spatial properties of the data can be captured using Convolutional Neural Networks (CNNs). Recent approaches show promising segmentation results using convolutional encoder-decoders such as the U-Net. Nevertheless, these methods are limited by their inability to incorporate temporal information, that can facilitate segmentation of individual touching cells or of cells that are partially visible. In order to exploit cell dynamics we propose a novel segmentation architecture which integrates Convolutional Long Short Term Memory (C-LSTM) with the U-Net. The network’s unique architecture allows it to capture multi-scale, compact, spatio-temporal encoding in the C-LSTMs memory units. The method was evaluated on the Cell Tracking Challenge and achieved state-of-the-art results (1st on Fluo-N2DH-SIM + and 2nd on DIC-C2DLHeLa datasets).
@inproceedings{goldberg2018sampling,
title={Sampling technique for defining segmentation error margins with application to structural brain mri},
author={Ben Hamu Goldberg, Heli and Mushkin, Jonathan and Riklin Raviv, Tammy and Sochen, Nir},
booktitle={2018 25th IEEE International Conference on Image Processing (ICIP)},
pages={734--738},
year={2018},
organization={IEEE}
}copy to clipboardImage segmentation is often considered a deterministic process with a single ground truth. Nevertheless, in practice, and in particular, when medical imaging analysis is considered, the extraction of regions of interest (ROIs) is ill-posed and the concept of `most probable’ segmentation is model-dependent. In this paper, a measure for segmentation uncertainty in the form of segmentation error margins is introduced. This measure provides a goodness quantity and allows a `fully informed’ comparison between extracted boundaries of related ROIs as well as more meaningful statistical analysis. The tool we present is based on a novel technique for segmentation sampling in the Fourier domain and Markov Chain Monte Carlo (MCMC). The method was applied to cortical and sub-cortical structure segmentation in MRI. Since the accuracy of segmentation error margins cannot be validated, we use receiver operating characteristic (ROC) curves to support the proposed method. Precision and recall scores with respect to expert annotation suggest this method as a promising tool for a variety of medical imaging applications including user-interactive segmentation, patient follow-up, and cross-sectional analysis.
@inproceedings{gorodissky2018symmetry,
title={Symmetry-based analysis of diffusion MRI for the detection of brain impairments},
author={Gorodissky, O.A. and Sharon, A. and Danov, A. and Riklin Raviv, T.},
booktitle={2018 25th IEEE International Conference on Image Processing (ICIP)},
pages={376--379},
year={2018},
organization={IEEE}
}copy to clipboardWe present a novel computational approach to detect white-matter brain impairments following stroke or Traumatic Brain Injuries (TBI). A key assumption in our study is that the two hemispheres are not affected identically. The pathology of white matter (WM) tracts can be thus identified according to the asymmetry level of brain diffusivity measures, such as Fractional Anisotropy (FA) and Mean Diffusivity (MD), extracted from Diffusion Tensor Imaging (DTI). The proposed methodological contribution is based on the construction of a sequence of isosurfaces of these scalar measures and their symmetrical counterparts, obtained by reflecting the original isosurfaces. The modified Hausdorff distance is then used to measure the dissimilarity between each corresponding pair of aligned surfaces. The proposed method is assessed using datasets of normal controls (NCs), stroke patients and longitudinal brain scans of football players that might have been exposed to mild head traumas. Increased asymmetry with respect to NCs is shown for the stroke patients and some of the players indicating possible WM injuries.
@inproceedings{hershkovitch2018model,
title={Model-dependent uncertainty estimation of medical image segmentation},
author={Hershkovitch, Tsachi and Riklin Raviv, Tammy},
booktitle={2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)},
pages={1373--1376},
year={2018},
organization={IEEE}
}copy to clipboardSegmentation is a prevalent research area in medical imaging analysis. Nevertheless, estimation of the uncertainty margins of the extracted anatomical structure or pathology boundaries is seldom considered. This paper studies the concept of segmentation uncertainty of clinical images, acknowledging its great importance to patient follow up, user-interaction guidance, and morphology-based population studies. We propose a novel approach for model-dependent uncertainty estimation for image segmentation. The key contribution is an alternating, iterative algorithm for the generation of an image-specific uncertainty map. This is accomplished by defining a consistency-based measure and applying it to segmentation samples to estimate the uncertainty margins as well as the midline segmentation. We utilize the stochastic active contour framework as our segmentation generator, yet any sampling method can be applied. The method is validated on synthetic data for well-defined objects blurred with known Gaussian kernels. Further assessment of the method is provided by an application of the proposed consistency-based algorithm to ensembles of stochastic segmentations of brain hemorrhage in CT scans.
@inproceedings{kodner2017atlas,
title={Atlas of classifiers for brain MRI segmentation},
author={Kodner, Boris and Gordon, Shiri and Goldberger, Jacob and Riklin Raviv, Tammy},
booktitle={International Workshop on Machine Learning in Medical Imaging},
pages={36--44},
year={2017},
organization={Springer}
}copy to clipboardWe present a conceptually novel framework for brain tissue segmentation based on an Atlas of Classifiers (AoC). The AoC allows a statistical summary of the annotated datasets taking into account both the imaging data and the corresponding labels. It is therefore more informative than the classical probabilistic atlas and more economical than the popular multi-atlas approaches, which require large memory consumption and high computational complexity for each segmentation. Specifically, we consider an AoC as a spatial map of voxel-wise multinomial logistic regression (LR) functions learned from the labeled data. Upon convergence, the resulting fixed LR weights (a few for each voxel) represent the training dataset, which might be huge. Segmentation of a new image is therefore immediate and only requires the calculation of the LR outputs based on the respective voxel-wise features. Moreover, the AoC construction is independent of the test images, providing the flexibility to train it on the available labeled data and use it for the segmentation of images from different datasets and modalities.
The proposed method has been applied to publicly available datasets for the segmentation of brain MRI tissues and is shown to outreach commonly used methods. Promising results were obtained also for multi-modal, cross-modality MRI segmentation.
@inproceedings{benou2016noising,
title={De-noising of contrast-enhanced MRI sequences by an ensemble of expert deep neural networks},
author={Benou, Ariel and Veksler, Ronel and Friedman, Alon and Riklin Raviv, Tammy},
booktitle={International Workshop on Deep Learning in Medical Image Analysis},
pages={95--110},
year={2016},
organization={Springer}
}copy to clipboardDynamic contrast-enhanced MRI (DCE-MRI) is an imaging protocol where MRI scans are acquired repetitively throughout the injection of a contrast agent. The analysis of dynamic scans is widely used for the detection and quantification of blood brain barrier (BBB) permeability. Extraction of the pharmacokinetic (PK) parameters from the DCE-MRI washout curves allows quantitative assessment of the BBB functionality. Nevertheless, curve fitting required for the analysis of DCE-MRI data is error-prone as the dynamic scans are subject to non-white, spatially-dependent and anisotropic noise that does not fit standard noise models. The two existing approaches i.e. curve smoothing and image de-noising can either produce smooth curves but cannot guaranty fidelity to the PK model or cannot accommodate the high variability in noise statistics in time and space.
We present a novel framework based on Deep Neural Networks (DNNs) to address the DCE-MRI de-noising challenges. The key idea is based on an ensembling of expert DNNs, where each is trained for different noise characteristics and curve prototypes to solve an inverse problem on a specific subset of the input space. The most likely reconstruction is then chosen using a classifier DNN. As ground-truth (clean) signals for training are not available, a model for generating realistic training sets with complex nonlinear dynamics is presented. The proposed approach has been applied to DCE-MRI scans of stroke and brain tumor patients and is shown to favorably compare to state-of-the-art de-noising methods, without degrading the contrast of the original images.
@inproceedings{gordon2016co,
title={Co-segmentation of multiple images into multiple regions: Application to mouse brain MRI},
author={Gordon, Shiri and Dolgopyat, Irit and Kahn, Itamar and Riklin Raviv, Tammy},
booktitle={2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI)},
pages={399--402},
year={2016},
organization={IEEE}
}copy to clipboardChallenging biomedical segmentation problems can be addressed by combining top-down information based on the known anatomy along with bottom-up models of the image data. Anatomical priors can be provided by probabilistic atlases. Nevertheless, in many cases the available atlases are inadequate. We present a novel method for the co-segmentation of multiple images into multiple regions, where only a very few annotated examples exist. The underlying, unknown anatomy is learned throughout an interleaved process, in which the segmentation of a region is supported both by the segmentation of the neighboring regions which share common boundaries and by the segmentation of corresponding regions in the other jointly segmented images. The method is applied to a mouse brain MRI dataset for the segmentation of five anatomical structures. Experimental results demonstrate the segmentation accuracy with respect to the data complexity.
@inproceedings{arbelle2015analysis,
title={Analysis of high-throughput microscopy videos: Catching up with cell dynamics},
author={Carpenter, A and Riklin Raviv, T},
booktitle={International Conference on Medical Image Computing and Computer-Assisted Intervention},
pages={218--225},
year={2015},
organization={Springer}
}copy to clipboardWe present a novel framework for high-throughput cell lineage analysis in time-lapse microscopy images. Our algorithm ties together two fundamental aspects of cell lineage construction, namely cell segmentation and tracking, via a Bayesian inference of dynamic models. The proposed contribution exploits the Kalman inference problem by estimating the time-wise cell shape uncertainty in addition to cell trajectory. These inferred cell properties are combined with the observed image measurements within a fast marching (FM) algorithm, to achieve posterior probabilities for cell segmentation and association. Highly accurate results on two different cell-tracking datasets are presented.
@inproceedings{gilad2015symmetry,
title={Symmetry-based mitosis detection in time-lapse microscopy},
author={Gilad, Topaz and Bray, Mark-Anthony and Carpenter, Anne E and Riklin Raviv, Tammy},
booktitle={2015 IEEE 12th international symposium on biomedical imaging (ISBI)},
pages={164--167},
year={2015},
organization={IEEE}
}copy to clipboardProviding a general framework for mitosis detection is challenging. The variability of the visual traits and temporal features which classify the event of cell division is huge due to the numerous cell types, perturbations, imaging techniques and protocols used in microscopy imaging analysis studies. The commonly used machine learning techniques are based on the extraction of comprehensive sets of discriminative features from labeled examples and therefore do not apply to general cases as they are restricted to trained datasets. We present a robust mitotic event detection algorithm that accommodates the difficulty of the different cell appearances and dynamics. Addressing symmetrical cell divisions, we consider the anaphase stage, immediately after the DNA material divides, at which the two daughter cells are approximately identical. Having detected pairs of candidate daughter cells, based on their association to potential mother cells, we look for the respective symmetry axes. Mitotic event is detected based on the calculated measure of symmetry of each candidate pair of cells. Promising mitosis detection results for four different time-lapse microscopy datasets were obtained.
@article{menze2014multimodal,
title={The multimodal brain tumor image segmentation benchmark (BRATS)},
author={Menze, Bjoern H and Jakab, Andras and Bauer, Stefan and Riklin Raviv, Tammy and Kalpathy-Cramer, Jayashree and Farahani, Keyvan and Kirby, Justin and Burren, Yuliya and Porz, Nicole and Slotboom, Johannes and Wiest, Roland and others},
journal={IEEE transactions on medical imaging},
volume={34},
number={10},
pages={1993--2024},
year={2014},
publisher={IEEE}
}copy to clipboardIn this paper we report the set-up and results of the Multimodal Brain Tumor Image Segmentation Benchmark (BRATS) organized in conjunction with the MICCAI 2012 and 2013 conferences. Twenty state-of-the-art tumor segmentation algorithms were applied to a set of 65 multi-contrast MR scans of low- and high-grade glioma patients—manually annotated by up to four raters—and to 65 comparable scans generated using tumor image simulation software. Quantitative evaluations revealed considerable disagreement between the human raters in segmenting various tumor sub-regions (Dice scores in the range 74%–85%), illustrating the difficulty of this task. We found that different algorithms worked best for different sub-regions (reaching performance comparable to human inter-rater variability), but that no single algorithm ranked in the top for all sub-regions simultaneously. Fusing several good algorithms using a hierarchical majority vote yielded segmentations that consistently ranked above all individual algorithms, indicating remaining opportunities for further methodological improvements. The BRATS image data and manual annotations continue to be publicly available through an online evaluation system as an ongoing benchmarking resource.
@inproceedings{shitrit2014probabilistic,
title={Probabilistic model for 3d interactive segmentation},
author={Shitrit, Ohad and Hershkovich, Tsachi and Shalmon, Tamar and Shelef, Ilan and Riklin Raviv, Tammy}
}copy to clipboardFully-automated segmentation algorithms offer fast, objective, and reproducible results for large data collections. However these techniques cannot handle tasks that require contextual knowledge not readily available in the images alone. Thus, the expertise of an experienced physician is necessary.
We present a generative approach to image segmentation, which supports an intuitive and convenient user interaction subject to the bottomup constraints introduced by the image intensities. The user “dialogue” with the segmentation algorithm, via several mouse clicks in regions of disagreement, is formulated as an additional, spatial term in a global cost functional for 3D segmentation. The method is exemplified for the segmentation of cerebral hemorrhages (CH) in human brain CT scans.
@inproceedings{raviv2010morphology,
title={Morphology-guided graph search for untangling objects: C. elegans analysis},
author={Riklin Raviv, Tammy},
booktitle={International Conference on Medical Image Computing and Computer-Assisted Intervention},
pages={634--641},
year={2010},
organization={Springer}
}copy to clipboardWe present a novel approach for extracting cluttered objects based on their morphological properties. Specifically, we address the problem of untangling Caenorhabditis elegans clusters in high-throughput screening experiments. We represent the skeleton of each worm cluster by a sparse directed graph whose vertices and edges correspond to worm segments and their adjacencies, respectively. We then search for paths in the graph that are most likely to represent worms while minimizing overlap. The worm likelihood measure is defined on a low-dimensional feature space that captures different worm poses, obtained from a training set of isolated worms. We test the algorithm on 236 microscopy images, each containing 15 C. elegans worms, and demonstrate successful cluster untangling and high worm detection accuracy.
@inproceedings{wahlby2010resolving,
title={Resolving clustered worms via probabilistic shape models},
author={Wählby, Carolina and Riklin Raviv, Tammy and Ljosa, Vebjorn and Conery, Annie L and Golland, Polina and Ausubel, Frederick M and Carpenter, Anne E},
booktitle={2010 IEEE International Symposium on Biomedical Imaging: From Nano to Macro},
pages={552--555},
year={2010},
organization={IEEE}
}copy to clipboardThe roundworm Caenorhabditis elegans is an effective model system for biological processes such as immunity, behavior, and metabolism. Robotic sample preparation together with automated microscopy and image analysis has recently enabled high-throughput screening experiments using C. elegans. So far, such experiments have been limited to per-image measurements due to the tendency of the worms to cluster, which prevents extracting features from individual animals. We present a novel approach for the extraction of individual C. elegans from clusters of worms in high-throughput microscopy images. The key ideas are the construction of a low-dimensional shape-descriptor space and the definition of a probability measure on it. Promising segmentation results are shown.
@inproceedings{riklin2009joint,
title={Joint segmentation of image ensembles via latent atlases},
author={Riklin Raviv, Tammy and Van Leemput, Koen and Wells III, William M and Golland, Polina},
booktitle={International Conference on Medical Image Computing and Computer-Assisted Intervention},
pages={272--280},
year={2009},
organization={Springer}
}copy to clipboardSpatial priors, such as probabilistic atlases, play an important role in MRI segmentation. However, the availability of comprehensive, reliable and suitable manual segmentations for atlas construction is limited. We therefore propose a joint segmentation of corresponding, aligned structures in the entire population that does not require a probability atlas. Instead, a latent atlas, initialized by a single manual segmentation, is inferred from the evolving segmentations of the ensemble. The proposed method is based on probabilistic principles but is solved using partial differential equations (PDEs) and energy minimization criteria. We evaluate the method by segmenting 50 brain MR volumes. Segmentation accuracy for cortical and subcortical structures approaches the quality of state-of-the-art atlas-based segmentation results, suggesting that the latent atlas method is a reasonable alternative when existing atlases are not compatible with the data to be processed.
@inproceedings{ben2009interactive,
title={Interactive level set segmentation for image-guided therapy},
author={Ben-Zadok, Nir and Riklin Raviv, Tammy and Kiryati, Nahum},
booktitle={2009 IEEE International Symposium on Biomedical Imaging: From Nano to Macro},
pages={1079--1082},
year={2009},
organization={IEEE}
}copy to clipboardImage-guided therapy procedures require the patient to remain still throughout the image acquisition, data analysis and therapy. This imposes a tight time constraint on the over-all process. Automatic extraction of the pathological regions prior to the therapy can be faster than the customary manual segmentation performed by the physician. However, the image data alone is usually not sufficient for reliable and unambiguous computerized segmentation. Thus, the oversight of an experienced physician remains mandatory. We present a novel segmentation framework, that allows user feedback. A few mouse-clicks of the user, discrete in nature, are represented as a continuous energy term that is incorporated into a level-set functional. We demonstrate the proposed method on MR scans of uterine fibroids acquired prior to focused ultrasound ablation treatment. The experiments show that with a minimal user input, automatic segmentation results become practically identical to manual expert segmentation.
@inproceedings{kiryati2008real,
title={Real-time abnormal motion detection in surveillance video},
author={Kiryati, Nahum and Riklin Raviv, Tammy and Ivanchenko, Yan and Rochel, Shay},
booktitle={2008 19th International Conference on Pattern Recognition},
pages={1--4},
year={2008},
organization={IEEE}
}copy to clipboardVideo surveillance systems produce huge amounts of data for storage and display. Long-term human monitoring of the acquired video is impractical and ineffective. Automatic abnormal motion detection system which can effectively attract operator attention and trigger recording is therefore the key to successful video surveillance in dynamic scenes, such as airport terminals. This paper presents a novel solution for real-time abnormal motion detection. The proposed method is well-suited for modern video-surveillance architectures, where limited computing power is available near the camera for compression and communication. The algorithm uses the macroblock motion vectors that are generated in any case as part of the video compression process. Motion features are derived from the motion vectors. The statistical distribution of these features during normal activity is estimated by training. At the operational stage, improbable-motion feature values indicate abnormal motion. Experimental results demonstrate reliable real-time operation.
@inproceedings{riklin2007propagating,
title={Propagating distributions for segmentation of brain atlas},
author={Sochen, N and Bertand, L and Riklin Raviv, T and Ben-Zadok, N and Kiryati, N and Gefen, S and Nissanov, J},
booktitle={2007 4th IEEE International Symposium on Biomedical Imaging: From Nano to Macro},
pages={1304--1307},
year={2007},
organization={IEEE}
}copy to clipboardWe present a novel method for segmentation of anatomical structures in histological data. Segmentation is carried out slice-by-slice where the successful segmentation of one section provides a prior for the subsequent one. Intensities and spatial locations of the region of interest and the background are modeled by three-dimensional Gaussian mixtures. This information adaptively propagates across the sections. Segmentation is inferred by minimizing a cost functional that enforces the compatibility of the partitions with the corresponding models together with the alignment of the boundaries with the image gradients. The algorithm is demonstrated on histological images of mouse brain. The segmentation results compare well with manual segmentation.
@inproceedings{riklin2006mutual,
title={Mutual segmentation with level sets},
booktitle={2006 Conference on Computer Vision and Pattern Recognition Workshop (CVPRW'06)},
pages={177--177},
year={2006},
organization={IEEE}
}copy to clipboardWe suggest a novel variational approach for mutual segmentation of two images of the same object. The images are taken from different views, related by projective transformation. Each of the two images may not provide sufficient information for correct object-background delineation. The emerging segmentation of the object in each view provides a dynamic prior for the segmentation of the other image. The foundation of the proposed method is a unified level-set framework for region and edge based segmentation, associated with a shape similarity term. The dissimilarity between the two shape representations accounts for excess or deficient parts and is invariant to planar projective transformation. The suggested algorithm extracts the object in both images, correctly recovers its boundaries, and determines the homography between the two object views.
@inproceedings{riklin2006segmentation,
title={Segmentation by level sets and symmetry},
booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
volume={1},
pages={1015--1022},
year={2006},
organization={IEEE}
}copy to clipboardShape symmetry is an important cue for image understanding. In the absence of more detailed prior shape information, segmentation can be significantly facilitated by symmetry. However, when symmetry is distorted by perspectivity, the detection of symmetry becomes non-trivial, thus complicating symmetry-aided segmentation. We present an original approach for segmentation of symmetrical objects accommodating perspective distortion. The key idea is the use of the replicative form induced by the symmetry for challenging segmentation tasks. This is accomplished by dynamic extraction of the object boundaries, based on the image gradients, gray levels or colors, concurrently with registration of the image symmetrical counterpart (e.g. reflection) to itself. The symmetrical counterpart of the evolving object contour supports the segmentation by resolving possible ambiguities due to noise, clutter, distortion, shadows, occlusions and assimilation with the background. The symmetry constraint is integrated in a comprehensive level-set functional for segmentation that determines the evolution of the delineating contour. The proposed framework is exemplified on various images of skewsymmetrical objects and its superiority over state of the art variational segmentation techniques is demonstrated.
@inproceedings{riklin1999quotient,
title={The quotient image: Class based recognition and synthesis under varying illumination conditions},
booktitle={Proceedings. 1999 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (Cat. No PR00149)},
volume={2},
pages={566--571},
year={1999},
organization={IEEE}
}copy to clipboardThe paper addresses the problem of “class-based” recognition and image-synthesis with varying illumination. The class-based synthesis and recognition tasks are defined as follows: given a single input image of an object, and a sample of images with varying illumination conditions of other objects of the same general class, capture the equivalence relationship (by generation of new images or by invariants) among all images of the object corresponding to new illumination conditions. The key result in our approach is based on a definition of an illumination invariant signature image, we call the “quotient” image, which enables an analytic generation of the image space with varying illumination from a single input image and a very small sample of other objects of the class-in our experiments as few as two objects. In many cases the recognition results outperform by far conventional methods and the image-synthesis is of remarkable quality considering the size of the database of example images and the mild pre-process required for making the algorithm work.
@misc{kiryati2014apparatus,
title={Apparatus and methods for the detection of abnormal motion in a video stream},
author={Kiryati, Nahum and Riklin Raviv, Tammy and Ivanchenko, Yan and Rochel, Shay and Dvir, Igal and Harari, Daniel},
year={2014},
month=may # "~13",
publisher={Google Patents},
note={US Patent 8,724,891}
}copy to clipboardAn apparatus and method for detection of abnormal motion in video stream, having a training phase for defining normal motion and a detection phase for detecting abnormal motions in the video stream is provided. Motion is detected according to motion vectors and motion features extracted from video frames.
@book{riklin2007prior,
title={Prior based Image Segmentation},
author={Riklin Raviv, Tammy},
year={2007},
publisher={Tel Aviv University}
}copy to clipboardObject detection and segmentation can be facilitated by the availability of prior knowledge. This dissertation considers the incorporation of prior shape knowledge within a segmentation framework. The information about the expected shape of the object to extract is obtained from another image of the object. In the first part of the thesis we assume that the object boundaries in the prior image are known. The main challenge is accounting for projective transformations between the different object views. We address it by concurrent segmentation and registration processes. This is accomplished by the construction of a cost functional, where the dynamic variable is the object boundary represented by the zero level of a level-set function. The functional is optimized using calculus of variations. Explicit shape prior is not always available. Consider the simultaneous segmentation of two object views. When neither of the images can be correctly segmented based on its edges and gray levels alone, the shape of the region extracted in either of them cannot be used as a reliable prior for the other. We therefore suggest an alternate minimization framework in which the evolving segmentation of each image provides a dynamic prior for the other. We call this process mutual segmentation. When only a single image is given but the object taken is known to be symmetrical, the symmetry property forms a significant shape constraint and thus can be used to support segmentation. The third part of this thesis deals with the extraction of objects with either bilateral or rotational symmetry in the presence of perspective distortion. The key idea is the use of the symmetrical counterpart image obtained by a flip or rotation of the source image as another view of the object. The theoretical foundation of the proposed method is a theorem, proven in this thesis, showing that symmetrical counterpart images are related by planar projective transformation.
The methods suggested are demonstrated on a variety of images that were taken in the presence of noise, shadows, occlusions or clutter. For each of the examples, accurate extraction of the object boundaries is shown together with the recovery of the planar projective transformation that relates the object views.
Some of the concepts developed are demonstrated on bio-medical applications. We show the delineation of uterine fibroids in MR images. Volumetric segmentation of mouse brain structures from histological data is also presented.
The thesis addresses the problem of “class-based” image-based recognition and rendering with varying illumination. The rendering problem is defined as follows: given a single input image of an object, and a sample of images with varying illumination conditions of other objects of the same general class, re-render the input image to simulate new illumination conditions. The class-based recognition problem is similarly defined: given a single image of an object in a database of images of other objects, some of them are multiply sampled under varying illumination, identify (match) any novel image of that object under varying illumination with the single image of that object in the database. We focus on Lambertian surface classes, and in particular the class of human faces. The key result in our approach is based on a definition of an illumination invariant signature image which enables an analytic generation of the image space with varying illumination. We show that a small database of objects—in our experiments as few as two objects—is sufficient for generating the image space with varying illumination of any new object of the class from a single input image of that object. In many cases the recognition results outperform by far conventional methods and the re-rendering is of remarkable quality considering the size of the database of example images and the mild pre-process required for making the algorithm work.
@article{waizman2025tractotransformer,
title={TractoTransformer: Diffusion MRI Streamline Tractography using CNN and Transformer Networks},
author={Waizman, Itzik and Gusakov, Yakov and Benou, Itay and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:2509.16429},
year={2025}
}copy to clipboardWhite matter tractography is an advanced neuroimaging technique that reconstructs the 3D white matter pathways of the brain from diffusion MRI data. It can be framed as a pathfinding problem aiming to infer neural fiber trajectories from noisy and ambiguous measurements, facing challenges such as crossing, merging, and fanning white-matter configurations. In this paper, we propose a novel tractography method that leverages Transformers to model the sequential nature of white matter streamlines, enabling the prediction of fiber directions by integrating both the trajectory context and current diffusion MRI measurements. To incorporate spatial information, we utilize CNNs that extract microstructural features from local neighborhoods around each voxel. By combining these complementary sources of information, our approach improves the precision and completeness of neural pathway mapping compared to traditional tractography models. We evaluate our method with the Tractometer toolkit, achieving competitive performance against state-of-the-art approaches, and present qualitative results on the TractoInferno dataset, demonstrating strong generalization to real-world data.
@article{pachter2025serum,
title={Serum Galectin-9 and Decorin in relation to brain aging and the green-Mediterranean diet: A secondary analysis of the DIRECT PLUS randomized trial},
author={Pachter, Dafna and Meir, Anat Y and Kaplan, Alon and Tsaban, Gal and Zelicha, Hila and Rinott, Ehud and Levakov, Gidon and Finkelstein, Ofek and Shelef, Ilan and Salti, Moti and Riklin Raviv, Tammy and others},
journal={Clinical Nutrition},
year={2025},
publisher={Elsevier}
}copy to clipboardBackground and aims
We explored whether changes in serum proteomic profiles differed between participants with distinct brain aging trajectories, and whether these changes were influenced by dietary intervention.
Methods
In this secondary analysis of the 18-month DIRECT PLUS trial, 294 participants were randomized to one of three arms: 1) Healthy dietary guidelines (HDG); 2) Mediterranean (MED) diet (+440 mg/day polyphenols from walnuts); or 3) low red/processed meat green-MED diet (+1240 mg/day polyphenols from walnuts, Mankai plant, and green tea). We measured 87 serum proteins (Olink-CVDII). We used Magnetic-Resonance-Imaging (MRI)-assessed 3D-T1-weighted brain scans for brain age calculation (by convolutional neural network) to identify protein markers reflecting the brain age gap (BAG; deviation of MRI-assessed brain age from chronological age).
Results
At baseline, lower weight, waist circumference, diastolic blood pressure, and HbA1c parameters were associated with a younger brain age than expected. Specifically, higher levels of two proteins, Galectin-9 (Gal-9) and Decorin (DCN), were associated with accelerated brain aging (larger BAG). A proteomics principal component analysis (PCA) revealed a difference in PC1 between the two time-points for participants with accelerated brain aging. Between baseline and 18 months, Gal-9 significantly decreased among individuals who completed the intervention with attenuated brain aging, while DCN significantly increased among those who completed the trial with accelerated brain aging. A significant interaction was observed between the green-MED diet and proteomics PCA, resulting in a beneficial change compared to the HDG. Participants in the green-MED diet significantly decreased Gal-9 compared to the HDG diet and from baseline.
Conclusions
Higher serum levels of Gal-9 and DCN may indicate an acceleration of brain aging and could be reduced by a green-MED/high-polyphenol (green tea and Mankai) and low-red/processed meat diet.
@article{duenias2025hyperfusion,
title={Hyperfusion: A hypernetwork approach to multimodal integration of tabular and medical imaging data for predictive modeling},
author={Duenias, Daniel and Nichyporuk, Brennan and Arbel, Tal and Riklin Raviv, Tammy},
journal={Medical Image Analysis},
volume={102},
pages={103503},
year={2025},
publisher={Elsevier}
}copy to clipboardThe integration of diverse clinical modalities such as medical imaging and the tabular data extracted from patients’ Electronic Health Records (EHRs) is a crucial aspect of modern healthcare. Integrative analysis of multiple sources can provide a comprehensive understanding of the clinical condition of a patient, improving diagnosis and treatment decision. Deep Neural Networks (DNNs) consistently demonstrate outstanding performance in a wide range of multimodal tasks in the medical domain. However, the complex endeavor of effectively merging medical imaging with clinical, demographic and genetic information represented as numerical tabular data remains a highly active and ongoing research pursuit. We present a novel framework based on hypernetworks to fuse clinical imaging and tabular data by conditioning the image processing on the EHR’s values and measurements. This approach aims to leverage the complementary information present in these modalities to enhance the accuracy of various medical applications. We demonstrate the strength and generality of our method on two different brain Magnetic Resonance Imaging (MRI) analysis tasks, namely, brain age prediction conditioned by subject’s sex and multi-class Alzheimer’s Disease (AD) classification conditioned by tabular data. We show that our framework outperforms both single-modality models and state-of-the-art MRI tabular data fusion methods.
@article{cohen5137813unsupervised,
title={Unsupervised Bias Field Correction Via Deep Image Decomposition and Symmetry},
author={Cohen, Adar and Riklin Raviv, Tammy},
journal={Available at SSRN 5137813}
}copy to clipboardIn Magnetic Resonance Imaging (MRI), a bias field is an artifact that causes gradual changes in image brightness throughout the scan. This artifact is unrelated to the actual anatomy being scanned, complicating automated analysis and making it difficult to differentiate between tissue types or identify regions of interest. Although a variety of bias field correction tools exist, very few utilize deep learning techniques. The main challenge is the lack of clean data that matches the biased scans, rendering supervised methods impractical. Existing semi-supervised approaches often depend on external information that may not always be available.We present a novel unsupervised deep learning framework for bias field correction in 3D brain MRIs. The key idea involves the interplay between two generative networks that decompose the input into a clean image and a bias field. Training is further enhanced by incorporating a unique symmetry-based loss function, alongside adversarial and uniformity losses.Interestingly, the same method can also correct uneven illumination in natural images of portraits, showcasing its versatility. Both tasks involve eliminating unwanted artifacts which share a similar appearance and affect the visibility of underlying structures. We conducted extensive experiments using MRI datasets from healthy adults, brain tumor patients, infants, and mice, as well as portrait images. Our unsupervised framework outperformed traditional and deep learning-based methods, demonstrating its potential for enhancing downstream image analysis applications. The code will be made publicly available.
@article{lekadir2025future,
title={FUTURE-AI: international consensus guideline for trustworthy and deployable artificial intelligence in healthcare},
author={Lekadir, Karim and Frangi, Alejandro F and Porras, Antonio R and Glocker, Ben and Cintas, Celia and Langlotz, Curtis P and Weicken, Eva and Asselbergs, Folkert W and Prior, Fred and Collins, Gary S and Riklin Raviv, Tammy and others},
journal={bmj},
volume={388},
year={2025},
publisher={British Medical Journal Publishing Group}
}copy to clipboardDespite major advances in artificial intelligence (AI) research for healthcare, the deployment and adoption of AI technologies remain limited in clinical practice. This paper describes the FUTURE-AI framework, which provides guidance for the development and deployment of trustworthy AI tools in healthcare. The FUTURE-AI Consortium was founded in 2021 and comprises 117 interdisciplinary experts from 50 countries representing all continents, including AI scientists, clinical researchers, biomedical ethicists, and social scientists. Over a two year period, the FUTURE-AI guideline was established through consensus based on six guiding principles—fairness, universality, traceability, usability, robustness, and explainability. To operationalise trustworthy AI in healthcare, a set of 30 best practices was defined, addressing technical, clinical, socioethical, and legal dimensions. The recommendations cover the entire lifecycle of healthcare AI, from design, development, and validation to regulation, deployment, and monitoring.
@inproceedings{benou2025show,
title={Show and Tell: Visually Explainable Deep Neural Nets via Spatially-Aware Concept Bottleneck Models},
author={Benou, Itay and Riklin Raviv, Tammy},
booktitle={Proceedings of the Computer Vision and Pattern Recognition Conference},
pages={30063--30072},
year={2025}
}copy to clipboardModern deep neural networks have now reached human-level performance across a variety of tasks. However, unlike humans they lack the ability to explain their decisions by showing where and telling what concepts guided them. In this work, we present a unified framework for transforming any vision neural network into a spatially and conceptually interpretable model. We introduce a spatially-aware concept bottleneck layer that projects” black-box” features of pre-trained backbone models into interpretable concept maps, without requiring human labels. By training a classification layer over this bottleneck, we obtain a self-explaining model that articulates which concepts most influenced its prediction, along with heatmaps that ground them in the input image. Accordingly, we name this method” Spatially-Aware and Label-Free Concept Bottleneck Model”(SALF-CBM). Our results show that the proposed SALF-CBM:(1) Outperforms non-spatial CBM methods, as well as the original backbone, on a variety of classification tasks;(2) Produces high-quality spatial explanations, outperforming widely used heatmap-based methods on a zero-shot segmentation task;(3) Facilitates model exploration and debugging, enabling users to query specific image regions and refine the model’s decisions by locally editing its concept maps.
@article{pachter2024gal,
title={Gal-9 and DCN Serum Expression Reflect Accelerated Brain Aging and Are Attenuated by the Green-Mediterranean Diet: The 18-month DIRECT PLUS Proteomics-Brain MRI Trial},
author={Pachter, Dafna and Yaskolka Meir, Anat and Kaplan, Alon and Tsaban, Gal and Zelicha, Hila and Rinott, Ehud and Levakov, Gidon and Finkelstein, Ofek and Shelef, Ilan and Salti, Moti and Riklin Raviv, Tammy and others},
journal={medRxiv},
pages={2024--11},
year={2024},
publisher={Cold Spring Harbor Laboratory Press}
}copy to clipboardBackground
We recently reported that a green-Mediterranean (green-MED), high-polyphenol diet is potentially neuroprotective for age-related brain atrophy. Here, we explored the interplay between dietary intervention, proteomics profile, and accelerated brain age.
Methods
In the 18-month DIRECT PLUS trial, 294 participants (adherence rate=89%) were randomized to one of three arms: 1) Healthy dietary guidelines (HDG); 2) MED diet; or 3) green-MED diet. Both MED diets included 28g/day of walnuts. Additionally, the low red/processed meat green-MED group received daily supplements of polyphenol-rich green-tea and green Mankai aquatic plant. In this secondary analysis, we measured 87 serum proteins (Olink-CVDII) and conducted Magnetic Resonance Imaging (MRI) to obtain brain 3D-T1-weighted for brain age calculation based on brain convolutional neural network to identify protein markers reflecting the brain age gap (BAG: residual deviation of MRI-assessed brain age from chronological age).
Results
We analyzed eligible brain MRIs (216 at baseline and 18-month) for BAG calculation. At baseline (age=51.3yrs, 90% men), lower weight, waist circumference, diastolic blood pressure, and HbA1c parameters were associated with younger brain age than expected (p<0.05 for all). At baseline, higher levels of two specific proteins: Galectin-9 (Gal-9) and Decorin (DCN), were associated with larger BAG (accelerated brain aging; FDR<0.05). A proteomics principal component analysis (PCA) revealed a significant difference between the 18-month time points among participants who completed the trial with accelerated brain aging (p=0.02). Between baseline and 18 months, Gal-9 significantly decreased (p<0.05) among individuals who completed the intervention with attenuated brain age, and DCN significantly increased (p<0.05) among those who completed the trial with accelerated brain age. A significant interaction was observed between the green-MED diet and proteomics PCA change compared to the HDG (β=-1.7; p-interaction=0.05). Participants in the green-MED diet significantly decreased Gal-9 compared to the HDG diet (p=0.015) and from baseline (p=0.003). DCN levels, however, marginally increased in the HDG diet from baseline (p=0.053).
Conclusion
Higher serum levels of Gal-9 and DCN may indicate an acceleration of brain aging and might be reduced by the green-MED/high-polyphenol diet rich in Mankai and green-tea and low in red/processed meat.
@article{finkelstein2024deep,
title={Deep learning-based BMI inference from structural brain MRI reflects brain alterations following lifestyle intervention},
author={Finkelstein, Ofek and Levakov, Gidon and Kaplan, Alon and Zelicha, Hila and Meir, Anat Yaskolka and Rinott, Ehud and Tsaban, Gal and Witte, Anja Veronica and Blüher, Matthias and Stumvoll, Michael and Riklin Raviv, Tammy and others},
journal={Human Brain Mapping},
volume={45},
number={3},
pages={e26595},
year={2024},
publisher={Wiley Online Library}
}copy to clipboardObesity is associated with negative effects on the brain. We exploit Artificial Intelligence (AI) tools to explore whether differences in clinical measurements following lifestyle interventions in overweight population could be reflected in brain morphology. In the DIRECT-PLUS clinical trial, participants with criterion for metabolic syndrome underwent an 18-month lifestyle intervention. Structural brain MRIs were acquired before and after the intervention. We utilized an ensemble learning framework to predict Body-Mass Index (BMI) scores, which correspond to adiposity-related clinical measurements from brain MRIs. We revealed that patient-specific reduction in BMI predictions was associated with actual weight loss and was significantly higher in active diet groups compared to a control group. Moreover, explainable AI (XAI) maps highlighted brain regions contributing to BMI predictions that were distinct from regions associated with age prediction. Our DIRECT-PLUS analysis results imply that predicted BMI and its reduction are unique neural biomarkers for obesity-related brain modifications and weight loss.
@article{yang2024fine,
title={Fine hippocampal morphology analysis with a multi-dataset cross-sectional study on 2911 subjects},
author={Yang, Qinzhu and Chen, Guojing and Yang, Zhi and Riklin Raviv, Tammy and Gao, Yi},
journal={NeuroImage: Clinical},
volume={43},
pages={103620},
year={2024},
publisher={Elsevier}
}copy to clipboardCA1 subfield and subiculum of the hippocampus contain a series of dentate bulges, which are also called hippocampus dentation (HD). There have been several studies demonstrating an association between HD and brain disorders. Such as the number of hippocampal dentation correlates with temporal lobe epilepsy. And epileptic hippocampus have a lower number of dentation compared to contralateral hippocampus. However, most studies rely on subjective assessment by manual searching and counting in HD areas, which is time-consuming and labor-intensive to process large amounts of samples. And to date, only one objective method for quantifying HD has been proposed. Therefore, to fill this gap, we developed an automated and objective method to quantify HD and explore its relationship with neurodegenerative diseases. In this work, we performed a fine-scale morphological characterization of HD in 2911 subjects from four different cohorts of ADNI, PPMI, HCP, and IXI to quantify and explore differences between them in MR T1w images. The results showed that the degree of right hippocampal dentation are lower in patients with Alzheimer’s disease than samples in mild cognitive impairment or cognitively normal, whereas this change is not significant in Parkinson’s disease progression. The innovation of this paper that we propose a quantitative, robust, and fully automated method. These methodological innovation and corresponding results delineated above constitute the significance and novelty of our study. What’s more, the proposed method breaks through the limitations of manual labeling and is the first to quantitatively measure and compare HD in four different brain populations including thousands of subjects. These findings revealed new morphological patterns in the hippocampal dentation, which can help with subsequent fine-scale hippocampal morphology research.
@article{avi2023differentiable,
title={Differentiable histogram loss functions for intensity-based image-to-image translation},
author={Avi-Aharon, Mor and Arbelle, Assaf and Riklin Raviv, Tammy},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume={45},
number={10},
pages={11642--11653},
year={2023},
publisher={IEEE}
}copy to clipboardWe introduce the HueNet – a novel deep learning framework for a differentiable construction of intensity (1D) and joint (2D) histograms and present its applicability to paired and unpaired image-to-image translation problems. The key idea is an innovative technique for augmenting a generative neural network by histogram layers appended to the image generator. These histogram layers allow us to define two new histogram-based loss functions for constraining the structural appearance of the synthesized output image and its color distribution. Specifically, the color similarity loss is defined by the Earth Mover’s Distance between the intensity histograms of the network output and a color reference image. The structural similarity loss is determined by the mutual information between the output and a content reference image based on their joint histogram. Although the HueNet can be applied to a variety of image-to-image translation problems, we chose to demonstrate its strength on the tasks of color transfer, exemplar-based image colorization, and edges → photo, where the colors of the output image are predefined.
@article{ziv2021stochastic,
title={Stochastic weight pruning and the role of regularization in shaping network structure},
author={Ziv, Yael and Goldberger, Jacob and Riklin Raviv, Tammy},
journal={Neurocomputing},
volume={462},
pages={555--567},
year={2021},
publisher={Elsevier}
}copy to clipboardThe pressing need to reduce the capacity of deep neural networks has stimulated the development of network dilution methods and their analysis. In this study we present a framework for neural network pruning by sampling from a probability function that favors the zeroing of smaller parameters. This procedure of stochastically setting network weights to zero is done after each parameter updating step in the network learning algorithm. As part of the proposed framework, we examine the contribution of L1 and L2 regularization to the dynamics of pruning larger network structures such as neurons and filters while optimizing for weight pruning. We then demonstrate the effectiveness of the proposed stochastic pruning framework when used together with regularization terms for different network architectures and image analysis tasks. Specifically, we show that using our method we can successfully remove more than 50% of the channels/filters in VGG-16 and MobileNetV2 for CIFAR10 classification; in ResNet56 for CIFAR100 classification; in a U-Net for instance segmentation of biological cells; and in a CNN model tailored for COVID-19 detection. For these filter-pruned networks, we also present competitive weight pruning results while maintaining the accuracy levels of the original, dense networks.
@article{gordon2021atlas,
title={An atlas of classifiers—a machine learning paradigm for brain MRI segmentation},
author={Gordon, Shiri and Kodner, Boris and Goldfryd, Tal and Sidorov, Michael and Goldberger, Jacob and Riklin Raviv, Tammy},
journal={Medical \& Biological Engineering \& Computing},
volume={59},
number={9},
pages={1833--1849},
year={2021},
publisher={Springer}
}copy to clipboardWe present the Atlas of Classifiers (AoC)—a conceptually novel framework for brain MRI segmentation. The AoC is a spatial map of voxel-wise multinomial logistic regression (LR) functions learned from the labeled data. Upon convergence, the resulting fixed LR weights, a few for each voxel, represent the training dataset. It can, therefore, be considered as a light-weight learning machine, which despite its low capacity does not underfit the problem. The AoC construction is independent of the actual intensities of the test images, providing the flexibility to train it on the available labeled data and use it for the segmentation of images from different datasets and modalities. In this sense, it does not overfit the training data, as well. The proposed method has been applied to numerous publicly available datasets for the segmentation of brain MRI tissues and is shown to be robust to noise and outreach commonly used methods. Promising results were also obtained for multi-modal, cross-modality MRI segmentation. Finally, we show how AoC trained on brain MRIs of healthy subjects can be exploited for lesion segmentation of multiple sclerosis patients.
@article{shaul2020subsampled,
title={Subsampled brain MRI reconstruction by generative adversarial neural networks},
author={Shaul, Roy and David, Itamar and Shitrit, Ohad and Riklin Raviv, Tammy},
journal={Medical Image Analysis},
volume={65},
pages={101747},
year={2020},
publisher={Elsevier}
}copy to clipboardA main challenge in magnetic resonance imaging (MRI) is speeding up scan time. Beyond improving patient experience and reducing operational costs, faster scans are essential for time-sensitive imaging, such as fetal, cardiac, or functional MRI, where temporal resolution is important and target movement is unavoidable, yet must be reduced. Current MRI acquisition methods speed up scan time at the expense of lower spatial resolution and costlier hardware. We introduce a practical, software-only framework, based on deep learning, for accelerating MRI acquisition, while maintaining anatomically meaningful imaging. This is accomplished by MRI subsampling followed by estimating the missing k-space samples via generative adversarial neural networks. A generator-discriminator interplay enables the introduction of an adversarial cost in addition to fidelity and image-quality losses used for optimizing the reconstruction.
Promising reconstruction results are obtained from feasible sampling patterns of up to a fivefold acceleration of diverse brain MRIs, from a large publicly available dataset of healthy adult scans as well as multimodal acquisitions of multiple sclerosis patients and dynamic contrast-enhanced MRI (DCE-MRI) sequences of stroke and tumor patients. Clinical usability of the reconstructed MRI scans is assessed by performing either lesion or healthy tissue segmentation and comparing the results to those obtained by using the original, fully sampled images. Reconstruction quality and usability of the DCE-MRI sequences is demonstrated by calculating the pharmacokinetic (PK) parameters. The proposed MRI reconstruction approach is shown to outperform state-of-the-art methods for all datasets tested in terms of the peak signal-to-noise ratio (PSNR), the structural similarity index (SSIM), as well as either the mean squared error (MSE) with respect to the PK parameters, calculated for the fully sampled DCE-MRI sequences, or the segmentation compatibility, measured in terms of Dice scores and Hausdorff distance.
@article{levakov2020deep,
title={From a deep learning model back to the brain—Identifying regional predictors and their relation to aging},
author={Levakov, Gidon and Rosenthal, Gideon and Shelef, Ilan and Riklin Raviv, Tammy and Avidan, Galia},
journal={Human brain mapping},
volume={41},
number={12},
pages={3235--3252},
year={2020},
publisher={Wiley Online Library}
}copy to clipboardWe present a Deep Learning framework for the prediction of chronological age from structural magnetic resonance imaging scans. Previous findings associate increased brain age with neurodegenerative diseases and higher mortality rates. However, the importance of brain age prediction goes beyond serving as biomarkers for neurological disorders. Specifically, utilizing convolutional neural network (CNN) analysis to identify brain regions contributing to the prediction can shed light on the complex multivariate process of brain aging. Previous work examined methods to attribute pixel/voxel-wise contributions to the prediction in a single image, resulting in “explanation maps” that were found noisy and unreliable. To address this problem, we developed an inference scheme for combining these maps across subjects, thus creating a population-based, rather than a subject-specific map. We applied this method to a CNN ensemble trained on predicting subjects’ age from raw T1 brain images in a lifespan sample of 10,176 subjects. Evaluating the model on an untouched test set resulted in mean absolute error of 3.07 years and a correlation between chronological and predicted age of r = 0.98. Using the inference method, we revealed that cavities containing cerebrospinal fluid, previously found as general atrophy markers, had the highest contribution for age prediction. Comparing maps derived from different models within the ensemble allowed to assess differences and similarities in brain regions utilized by the model. We showed that this method substantially increased the replicability of explanation maps, converged with results from voxel-based morphometry age studies and highlighted brain regions whose volumetric variability correlated the most with the prediction error.
@article{levitt2020miswiring,
title={Miswiring of frontostriatal projections in schizophrenia},
author={Levitt, James J and Nestor, Paul G and Kubicki, Marek and Lyall, Amanda E and Zhang, Fan and Riklin Raviv, Tammy and O′Donnell, Lauren J and McCarley, Robert W and Shenton, Martha E and Rathi, Yogesh},
journal={Schizophrenia bulletin},
volume={46},
number={4},
pages={990--998},
year={2020},
publisher={Oxford University Press US}
}copy to clipboardWe investigated brain wiring in chronic schizophrenia and healthy controls in frontostriatal circuits using diffusion magnetic resonance imaging tractography in a novel way.
We extracted diffusion streamlines in 27 chronic schizophrenia and 26 healthy controls connecting 4 frontal subregions to the striatum. We labeled the projection zone striatal surface voxels into 2 subtypes: dominant-input from a single cortical subregion, and, functionally integrative, with mixed-input from diverse cortical subregions.
We showed: 1) a group difference for total striatal surface voxel number (P = .045) driven by fewer mixed-input voxels in the left (P = .007), but not right, hemisphere; 2) a group by hemisphere interaction for the ratio quotient between voxel subtypes (P = .04) with a left (P = .006), but not right, hemisphere increase in schizophrenia, also reflecting fewer mixed-input voxels; and 3) fewer mixed-input voxel counts in schizophrenia (P = .045) driven by differences in left hemisphere limbic (P = .007) and associative (P = .01), but not sensorimotor, striatum.
These results demonstrate a less integrative pattern of frontostriatal structural connectivity in chronic schizophrenia. A diminished integrative pattern yields a less complex input pattern to the striatum from the cortex with less circuit integration at the level of the striatum. Further, as brain wiring occurs during early development, aberrant brain wiring could serve as a developmental biomarker for schizophrenia.
@article{avi2020deephist,
title={Deephist: Differentiable joint and color histogram layers for image-to-image translation},
author={Avi-Aharon, Mor and Arbelle, Assaf and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:2005.03995},
year={2020}
}copy to clipboardWe present the DeepHist – a novel Deep Learning framework for augmenting a network by histogram layers and demonstrate its strength by addressing image-to-image translation problems. Specifically, given an input image and a reference color distribution we aim to generate an output image with the structural appearance (content) of the input (source) yet with the colors of the reference. The key idea is a new technique for a differentiable construction of joint and color histograms of the output images. We further define a color distribution loss based on the Earth Mover’s Distance between the output’s and the reference’s color histograms and a Mutual Information loss based on the joint histograms of the source and the output images. Promising results are shown for the tasks of color transfer, image colorization and edges – photo, where the color distribution of the output image is controlled. Comparison to Pix2Pix and CyclyGANs are shown.
@article{avi2019hue,
title={Hue-net: Intensity-based image-to-image translation with differentiable histogram loss functions},
author={Avi-Aharon, Mor and Arbelle, Assaf and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:1912.06044},
year={2019}
}copy to clipboardWe present the Hue-Net – a novel Deep Learning framework for Intensity-based Image-to-Image Translation. The key idea is a new technique termed network augmentation which allows a differentiable construction of intensity histograms from images. We further introduce differentiable representations of (1D) cyclic and joint (2D) histograms and use them for defining loss functions based on cyclic Earth Mover’s Distance (EMD) and Mutual Information (MI). While the Hue-Net can be applied to several image-to-image translation tasks, we choose to demonstrate its strength on color transfer problems, where the aim is to paint a source image with the colors of a different target image. Note that the desired output image does not exist and therefore cannot be used for supervised pixel-to-pixel learning. This is accomplished by using the HSV color-space and defining an intensity-based loss that is built on the EMD between the cyclic hue histograms of the output and the target images. To enforce color-free similarity between the source and the output images, we define a semantic-based loss by a differentiable approximation of the MI of these images. The incorporation of histogram loss functions in addition to an adversarial loss enables the construction of semantically meaningful and realistic images. Promising results are presented for different datasets.
@article{levakov2019deep,
title={From a deep learning model back to the brain-inferring morphological markers and their relation to aging},
author={Levakov, Gidon and Rosenthal, Gideon and Riklin Raviv, Tammy and Shelef, Ilan and Avidan, Galia},
journal={bioRxiv},
pages={803742},
year={2019},
publisher={Cold Spring Harbor Laboratory}
}copy to clipboardWe present a Deep Learning framework for the prediction of chronological age from structural MRI scans. Previous findings associate an overestimation of brain age with neurodegenerative diseases and higher mortality rates. However, the importance of brain age prediction goes beyond serving as biomarkers for neurological disorders. Specifically, utilizing convolutional neural network (CNN) analysis to identify brain regions contributing to the prediction can shed light on the complex multivariate process of brain aging. Previous work examined methods to attribute pixel/voxel-wise contributions to the prediction in a single image, resulting in ‘explanation maps’ that were found noisy and unreliable. To address this problem, we developed an inference framework for combining these maps across subjects, thus creating a population-based rather than a subject-specific map. We applied this method to a CNN ensemble trained on predicting subjects’ age from raw T1 brain images of 10,176 subjects. Evaluating the model on an untouched test set resulted in mean absolute error of 3.07 years and a correlation between chronological and predicted age of r=0.98. Using the inference method, we revealed that cavities containing CSF, previously found as general atrophy markers, had the highest contribution for age prediction. Comparing maps derived from different models within the ensemble allowed to assess differences and similarities in brain regions utilized by the model. We showed that this method substantially increased the replicability of explanation maps, converged with results from voxel-based morphometry age studies and highlighted brain regions whose volumetric variability contributed the most to the prediction.
@article{benou2019combining,
title={Combining white matter diffusion and geometry for tract-specific alignment and variability analysis},
author={Benou, Itay and Veksler, Ronel and Friedman, Alon and Riklin Raviv, Tammy},
journal={Neuroimage},
volume={200},
pages={674--689},
year={2019},
publisher={Elsevier}
}copy to clipboardWe present a framework for along-tract analysis of white matter (WM) fiber bundles based on diffusion tensor imaging (DTI) and tractography. We introduce the novel concept of fiber-flux density for modeling fiber tracts’ geometry, and combine it with diffusion-based measures to define vector descriptors called Fiber-Flux Diffusion Density (FFDD). The proposed model captures informative features of WM tracts at both the microscopic (diffusion-related) and macroscopic (geometry-related) scales, thus enabling improved sensitivity to subtle structural abnormalities that are not reflected by either diffusion or geometrical properties alone. A key step in this framework is the construction of an FFDD dissimilarity measure for sub-voxel alignment of fiber bundles, based on the fast marching method (FMM). The obtained aligned WM tracts enable meaningful inter-subject comparisons and group-wise statistical analysis. Moreover, we show that the FMM alignment can be generalized in a straight forward manner to a single-shot co-alignment of multiple fiber bundles. The proposed alignment technique is shown to outperform a well-established, commonly used DTI registration algorithm. We demonstrate the FFDD framework on the Human Connectome Project (HCP) diffusion MRI dataset, as well as on two different datasets of contact sports players. We test our method using longitudinal scans of a basketball player diagnosed with a traumatic brain injury, showing compatibility with structural MRI findings. We further perform a group study comparing mid- and post-season scans of 13 active football players exposed to repetitive head trauma, to 17 non-player control (NPC) subjects. Results reveal statistically significant FFDD differences (p-values<0.05) between the groups, as well as increased abnormalities over time at spatially-consistent locations within several major fiber tracts of football players.
@article{arbelle2019qanet,
title={QANet--Quality Assurance Network for Image Segmentation},
author={Arbelle, Assaf and Elul, Eliav and Riklin Raviv, Tammy},
journal={arXiv preprint arXiv:1904.08503},
year={2019}
}copy to clipboardWe introduce a novel Deep Learning framework, which quantitatively estimates image segmentation quality without the need for human inspection or labeling. We refer to this method as a Quality Assurance Network — QANet. Specifically, given an image and a `proposed’ corresponding segmentation, obtained by any method including manual annotation, the QANet solves a regression problem in order to estimate a predefined quality measure with respect to the unknown ground truth. The QANet is by no means yet another segmentation method. Instead, it performs a multi-level, multi-feature comparison of an image-segmentation pair based on a unique network architecture, called the RibCage.
To demonstrate the strength of the QANet, we addressed the evaluation of instance segmentation using two different datasets from different domains, namely, high throughput live cell microscopy images from the Cell Segmentation Benchmark and natural images of plants from the Leaf Segmentation Challenge. While synthesized segmentations were used to train the QANet, it was tested on segmentations obtained by publicly available methods that participated in the different challenges. We show that the QANet accurately estimates the scores of the evaluated segmentations with respect to the hidden ground truth, as published by the challenges’ organizers.
@article{ayache2019reviewers,
title={Reviewers--An acknowledgement},
author={Ayache, Nicholas and Duncan, James and Carneiro, Gustavo and Chung, Albert CS and Courtecuisse, Hadrien and Delingette, Herve and Dou, Qi and Duchateau, Nicolas and Dvornek, Nicha and Glocker, Ben and Riklin Raviv, Tammy and others},
journal={Medical Image Analysis},
volume={51},
pages={A1},
year={2019}
}copy to clipboard@article{arbelle2018probabilistic,
title={A probabilistic approach to joint cell tracking and segmentation in high-throughput microscopy videos},
author={Arbelle, Assaf and Reyes, Jose and Chen, Jia-Yun and Lahav, Galit and Riklin Raviv, Tammy},
journal={Medical image analysis},
volume={47},
pages={140--152},
year={2018},
publisher={Elsevier}
}copy to clipboardWe present a novel computational framework for the analysis of high-throughput microscopy videos of living cells. The proposed framework is generally useful and can be applied to different datasets acquired in a variety of laboratory settings. This is accomplished by tying together two fundamental aspects of cell lineage construction, namely cell segmentation and tracking, via a Bayesian inference of dynamic models. In contrast to most existing approaches, which aim to be general, no assumption of cell shape is made. Spatial, temporal, and cross-sectional variation of the analysed data are accommodated by two key contributions. First, time series analysis is exploited to estimate the temporal cell shape uncertainty in addition to cell trajectory. Second, a fast marching (FM) algorithm is used to integrate the inferred cell properties with the observed image measurements in order to obtain image likelihood for cell segmentation, and association. The proposed approach has been tested on eight different time-lapse microscopy data sets, some of which are high-throughput, demonstrating promising results for the detection, segmentation and association of planar cells. Our results surpass the state of the art for the Fluo-C2DL-MSC data set of the Cell Tracking Challenge (Maška et al., 2014).
@article{benou2017ensemble,
title={Ensemble of expert deep neural networks for spatio-temporal denoising of contrast-enhanced MRI sequences.},
author={Benou, A. and Veksler, R. and Friedman, A. and Riklin Raviv, T.},
journal={Medical Image Analysis},
volume={42},
pages={145--159},
year={2017}
}copy to clipboardDynamic contrast-enhanced MRI (DCE-MRI) is an imaging protocol where MRI scans are acquired repetitively throughout the injection of a contrast agent. The analysis of dynamic scans is widely used for the detection and quantification of blood-brain barrier (BBB) permeability. Extraction of the pharmacokinetic (PK) parameters from the DCE-MRI concentration curves allows quantitative assessment of the integrity of the BBB functionality. However, curve fitting required for the analysis of DCE-MRI data is error-prone as the dynamic scans are subject to non-white, spatially-dependent and anisotropic noise. We present a novel spatio-temporal framework based on Deep Neural Networks (DNNs) to address the DCE-MRI denoising challenges. This is accomplished by an ensemble of expert DNNs constructed as deep autoencoders, where each is trained on a specific subset of the input space to accommodate different noise characteristics and curve prototypes. Spatial dependencies of the PK dynamics are captured by incorporating the curves of neighboring voxels in the entire process. The most likely reconstructed curves are then chosen using a classifier DNN followed by a quadratic programming optimization. As clean signals (ground-truth) for training are not available, a fully automatic model for generating realistic training sets with complex nonlinear dynamics is introduced. The proposed approach has been successfully applied to full and even temporally down-sampled DCE-MRI sequences, from two different databases, of stroke and brain tumor patients, and is shown to favorably compare to state-of-the-art denoising methods.
@article{hershkovich2016probabilistic,
title={Probabilistic model for 3D interactive segmentation},
author={Hershkovich, Tsachi and Shalmon, Tamar and Shitrit, Ohad and Halay, Nir and Menze, Bjoern H and Dolgopyat, Irit and Kahn, Itamar and Shelef, Ilan and Riklin Raviv, Tammy},
journal={Computer Vision and Image Understanding},
volume={151},
pages={47--60},
year={2016},
publisher={Elsevier}
}copy to clipboardFully-automated segmentation algorithms offer fast, objective, and reproducible results for large data collections. However, these techniques cannot handle tasks that require contextual knowledge not readily available in the images alone. Thus, the supervision of an expert is necessary.
We present a generative model for image segmentation, based on a Bayesian inference. Not only does our approach support an intuitive and convenient user interaction subject to the bottom-up constraints introduced by the image intensities, it also circumvents the main limitations of a human observer—3D visualization and modality fusion. The user “dialogue” with the segmentation algorithm via several mouse clicks in regions of disagreement, is formulated as a continuous probability map, that represents the user’s certainty to whether the current segmentation should be modified. Considering this probability map as the voxel-vise Bernoulli priors on the image labels allows spatial encoding of the user-provided input. The method is exemplified for the segmentation of cerebral hemorrhages (CH) in human brain CT scans; ventricles in degenerative mice brain MRIs, and tumors in multi-modal human brain MRIs and is shown to outperform three interactive, state-of-the-art segmentation methods in terms of accuracy, efficiency and user-workload.
@article{menze2015generative,
title={A generative probabilistic model and discriminative extensions for brain lesion segmentation—with application to tumor and stroke},
author={Menze, Bjoern H and Van Leemput, Koen and Lashkari, Danial and Riklin Raviv, Tammy and Geremia, Ezequiel and Alberts, Esther and Gruber, Philipp and Wegener, Susanne and Weber, Marc-Andr{\'e} and Sz{\'e}kely, Gabor and others},
journal={IEEE transactions on medical imaging},
volume={35},
number={4},
pages={933--946},
year={2015},
publisher={IEEE}
}copy to clipboardWe introduce a generative probabilistic model for segmentation of brain lesions in multi-dimensional images that generalizes the EM segmenter, a common approach for modelling brain images using Gaussian mixtures and a probabilistic tissue atlas that employs expectation-maximization (EM), to estimate the label map for a new image. Our model augments the probabilistic atlas of the healthy tissues with a latent atlas of the lesion. We derive an estimation algorithm with closed-form EM update equations. The method extracts a latent atlas prior distribution and the lesion posterior distributions jointly from the image data. It delineates lesion areas individually in each channel, allowing for differences in lesion appearance across modalities, an important feature of many brain tumor imaging sequences. We also propose discriminative model extensions to map the output of the generative model to arbitrary labels with semantic and biological meaning, such as “tumor core” or “fluid-filled structure”, but without a one-to-one correspondence to the hypo- or hyper-intense lesion areas identified by the generative model. We test the approach in two image sets: the publicly available BRATS set of glioma patient scans, and multimodal brain images of patients with acute and subacute ischemic stroke. We find the generative model that has been designed for tumor lesions to generalize well to stroke images, and the extended discriminative -discriminative model to be one of the top ranking methods in the BRATS evaluation.
@article{dittrich2014spatio,
title={A spatio-temporal latent atlas for semi-supervised learning of fetal brain segmentations and morphological age estimation},
author={Dittrich, Eva and Riklin Raviv, Tammy and Kasprian, Gregor and Donner, Ren{\'e} and Brugger, Peter C and Prayer, Daniela and Langs, Georg},
journal={Medical image analysis},
volume={18},
number={1},
pages={9--21},
year={2014},
publisher={Elsevier}
}copy to clipboardPrenatal neuroimaging requires reference models that reflect the normal spectrum of fetal brain development, and summarize observations from a representative sample of individuals. Collecting a sufficiently large data set of manually annotated data to construct a comprehensive in vivo atlas of rapidly developing structures is challenging but necessary for large population studies and clinical application. We propose a method for the semi-supervised learning of a spatio-temporal latent atlas of fetal brain development, and corresponding segmentations of emerging cerebral structures, such as the ventricles or cortex. The atlas is based on the annotation of a few examples, and a large number of imaging data without annotation. It models the morphological and developmental variability across the population. Furthermore, it serves as basis for the estimation of a structures’ morphological age, and its deviation from the nominal gestational age during the assessment of pathologies. Experimental results covering the gestational period of 20–30 gestational weeks demonstrate segmentation accuracy achievable with minimal annotation, and precision of morphological age estimation. Age estimation results on fetuses suffering from lissencephaly demonstrate that they detect significant differences in the age offset compared to a control group.
@inproceedings{serebro2025hyda,
title={HyDA: Hypernetworks for Test Time Domain Adaptation in Medical Imaging Analysis},
author={Serebro, Doron and Riklin Raviv, Tammy},
booktitle={International Conference on Medical Image Computing and Computer-Assisted Intervention},
pages={251--261},
year={2025},
organization={Springer}
}copy to clipboardMedical imaging datasets often vary due to differences in acquisition protocols, patient demographics, and imaging devices. These variations in data distribution, known as domain shift, present a significant challenge in adapting imaging analysis models for practical healthcare applications. Most current domain adaptation (DA) approaches aim either to align the distributions between the source and target domains or to learn an invariant feature space that generalizes well across all domains. However, both strategies require access to a sufficient number of examples, though not necessarily annotated, from the test domain during training. This limitation hinders the widespread deployment of models in clinical settings, where target domain data may only be accessible in real time.
In this work, we introduce HyDA, a novel hypernetwork framework that leverages domain-specific characteristics rather than suppressing them, enabling dynamic adaptation at inference time. Specifically, HyDA learns implicit domain representations and uses them to adjust model parameters on-the-fly, allowing effective interpolation to unseen domains. We validate HyDA on two clinically relevant applications—MRI-based brain age prediction and chest X-ray pathology classification—demonstrating its ability to generalize across tasks and imaging modalities.
@inproceedings{shwartzman2024worrisome,
title={The worrisome impact of an inter-rater bias on neural network training},
author={Shwartzman, Or and Gazit, Harel and Ben-Aryeh, Gal and Shalef, Ilan and Riklin Raviv, Tammy},
booktitle={International Conference on Medical Imaging and Computer-Aided Diagnosis},
pages={463--473},
year={2024},
organization={Springer}
}copy to clipboardInter-rater bias in medical image segmentation is often overlooked when automatic models, machine learning included, are considered. However, nowadays when deep neural networks (DNNs) turn to be prevalent computational tools, and since most of the training processes are supervised to some extent, its influence on the prediction quality and reliability should be examined. In this study we employed a commonly-used supervised segmentation framework to quantify the influence of the training set annotator on the inferred segmentation masks. We found out that an inter-rater bias was amplified and became more consistent when the DNN’s predicted segmentations rather than the manual annotations themselves were compared. Specifically, we used two different datasets: brain MRIs of Multiple Sclerosis (MS) patients that were annotated by two raters with different level of expertise; and Intracerebral Hemorrhage (ICH) CT scans with manual and semi-manual segmentations. The results obtained imply a worrisome clinical implication of a DNN bias induced by an inter-rater bias during training. Specifically, we found a consistent underestimate of MS-lesion loads when calculated from segmentation predictions of a DNN trained on segmentation masks provided by the less experienced rater. In the same manner, the differences in ICH volumes calculated based on outputs of identical DNNs, each trained on annotations from a different source were more consistent and larger than the differences in volumes between the manual and semi-manual annotations used for training.
@inproceedings{buchnik2023generating,
title={Generating Artistic Images Via Few-Shot Style Transfer},
author={Buchnik, Itay and Berebi, Or and Riklin Raviv, Tammy and Shlezinger, Nir},
booktitle={2023 IEEE International Conference on Acoustics, Speech, and Signal Processing Workshops (ICASSPW)},
pages={1--5},
year={2023},
organization={IEEE}
}copy to clipboardGenerating images from a predefined style with heterogeneous and limited data is a challenging task for generative models. This work focuses on the conditional generation of artistic images, aiming to learn from a small set of paintings with high variability how to convert real-world photos into impressionistic paintings with the same given style. We design a few-shot style transfer model using a mixture of diverse one-shot style transfer generative models based on the SinGAN model. The proposed few-shot model coineEnSinGAN utilizes an ensemble of different SinGAN realizations to style transfer realistic photos to their closest painting style, by incorporating a novel aggregation mechanism based on the minimum cosine distance in the latent space of the feature vectors. EnSinGAN generates convincing impressionistic landscape images, and was awarded the first place in the Kaggle competition “I’m something of a painter myself” by being the closest in distribution to the test images.
@inproceedings{ben2022graph,
title={Graph neural network for cell tracking in microscopy videos},
author={Ben-Haim, Tal and Riklin Raviv, Tammy},
booktitle={European Conference on Computer Vision},
pages={610--626},
year={2022},
organization={Springer}
}copy to clipboardWe present a novel graph neural network (GNN) approach for cell tracking in high-throughput microscopy videos. By modeling the entire time-lapse sequence as a direct graph where cell instances are represented by its nodes and their associations by its edges, we extract the entire set of cell trajectories by looking for the maximal paths in the graph. This is accomplished by several key contributions incorporated into an end-to-end deep learning framework. We exploit a deep metric learning algorithm to extract cell feature vectors that distinguish between instances of different biological cells and assemble same cell instances. We introduce a new GNN block type which enables a mutual update of node and edge feature vectors, thus facilitating the underlying message passing process. The message passing concept, whose extent is determined by the number of GNN blocks, is of fundamental importance as it enables the ‘flow’ of information between nodes and edges much behind their neighbors in consecutive frames. Finally, we solve an edge classification problem and use the identified active edges to construct the cells’ tracks and lineage trees.
We demonstrate the strengths of the proposed cell tracking approach by applying it to 2D and 3D datasets of different cell types, imaging setups, and experimental conditions. We show that our framework outperforms current state-of-the-art methods on most of the evaluated datasets.
@inproceedings{ben2022deep,
title={A deep ensemble learning approach to lung CT segmentation for COVID-19 severity assessment},
author={Ben-Haim, Tal and Sofer, Ron Moshe and Ben-Arie, Gal and Shelef, Ilan and Riklin Raviv, Tammy},
booktitle={2022 IEEE International Conference on Image Processing (ICIP)},
pages={151--155},
year={2022},
organization={IEEE}
}copy to clipboardWe present a novel deep learning approach to categorical segmentation of lung CTs of COVID-19 patients. Specifically, we partition the scans into healthy lung tissues, non-lung regions, and two different, yet visually similar, pathological lung tissues, namely, ground-glass opacity and consolidation. This is accomplished via a unique, end-to-end hierarchical network architecture and ensemble learning, which contribute to the segmentation and provide a measure for segmentation uncertainty.The proposed framework achieves competitive results and outstanding generalization capabilities for three COVID-19 datasets. Our method is ranked second in a public Kaggle competition for COVID-19 CT images segmentation. Moreover, segmentation uncertainty regions are shown to correspond to the disagreements between the manual annotations of two different radiologists. Finally, preliminary promising correspondence results are shown for our private dataset when comparing the patients’ COVID-19 severity scores (based on clinical measures), and the segmented lung pathologies. Code and data are available at our repository.
@inproceedings{goldfryd2021deep,
title={Deep semi-supervised bias field correction of Mr images},
author={Goldfryd, Tal and Gordon, Shiri and Riklin Raviv, Tammy},
booktitle={2021 IEEE 18th international symposium on biomedical imaging (ISBI)},
pages={1836--1840},
year={2021},
organization={IEEE}
}copy to clipboardA bias field is an artifact inherent to MRI scanners which is manifested by a smooth intensity variation across the scans. We present an innovative generative approach to address the inverse problem of bias field estimation and removal in a semi-supervised manner. The key contribution is the construction of a compound framework of four interacting, adversarial neural networks. Specifically, we simultaneously train a pair of neural networks, one for the reconstruction of the plain bias field and the other for the reconstruction of a bias-free MRI scan, such that the output of each together with the input biased scans define the loss of the other network. A third network, trained as a bias-field discriminator provides an additional loss to the bias field generator while an MRI segmentation network provides an additional loss to the bias-free MRI generator. We trained and validated our framework using real MRI scans with simulated bias fields and tested it on publicly available brain data-sets as well as private data yielding results competitive with state-of-the-art methods. Code is available upon request.
@inproceedings{benou2019deeptract,
title={Deeptract: A probabilistic deep learning framework for white matter fiber tractography},
author={Benou, Itay and Riklin Raviv, Tammy},
booktitle={International conference on medical image computing and computer-assisted intervention},
pages={626--635},
year={2019},
organization={Springer}
}copy to clipboardWe present DeepTract, a deep-learning framework for estimating white matter fibers orientation and streamline tractography. We adopt a data-driven approach for fiber reconstruction from diffusion-weighted images (DWI), which does not assume a specific diffusion model. We use a recurrent neural network for mapping sequences of DWI values into probabilistic fiber orientation distributions. Based on these estimations, our model facilitates both deterministic and probabilistic streamline tractography. We quantitatively evaluate our method using the Tractometer tool, demonstrating competitive performance with state-of-the-art classical and machine learning based tractography algorithms. We further present qualitative results of bundle-specific probabilistic tractography obtained using our method.
@misc{arbelleweakly,
title={Weakly supervised microscopy cell segmentation via convolutional LSTM networks},
author={Arbelle, Assaf and Riklin Raviv, Tammy}
}copy to clipboardWe address individual cells’ segmentation from microscopy sequences. The main challenge in this type of problems is not only foreground-background classification but also the separation of adjacent cells. We apply two orthogonal approaches to overcome the multiple instance problem. From the segmentation perspective, we adopt the three class loss used by [1],[2]. The segmentation representation is designed to enhance individual cells’ delineation by a partitioning of image domain into three classes: foreground, background and cell contours. From the detection perspective, we get our inspiration from [3], and aim to detect rough cell markers. The markers, as opposed to the full segmentation, do not cover the entire cell, but are rather a small” blob” somewhere within the cell. The markers have two desirable properties. First, they are much smaller than the object and thus are easier to separate between instances. One marker will never overlap or touch boundaries with a neighboring marker. Second, the markers are easy to annotate, as the annotator does not need to be precise, making data acquisition a simpler task. Often, for microscopy image sequences, the only available annotation is in the form of markers or approximate cell centers. We train the proposed network to estimate both the segmentation and the markers and merge the two using the Fast Marching Distance (FMD)[4]. The entire framework is illustrated in Figure 1.
@inproceedings{arbelle2018microscopy,
title={Microscopy cell segmentation via adversarial neural networks},
author={Arbelle, Assaf and Riklin Raviv, Tammy},
booktitle={2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)},
pages={645--648},
year={2018},
organization={IEEE}
}copy to clipboardWe present a novel method for cell segmentation in microscopy images which is inspired by the Generative Adversarial Neural Network (GAN) approach. Our framework is built on a pair of two competitive artificial neural networks, with a unique architecture, termed Rib Cage, which are trained simultaneously and together define a min-max game resulting in an accurate segmentation of a given image. Our approach has two main strengths, similar to the GAN, the method does not require a formulation of a loss function for the optimization process. This allows training on a limited amount of annotated data in a weakly supervised manner. Promising segmentation results on real fluorescent microscopy data are presented.
@inproceedings{benou2018fiber,
title={Fiber-flux diffusion density for white matter tracts analysis: Application to mild anomalies localization in contact sports players},
author={Benou, Itay and Veksler, Ronel and Friedman, Alon and Riklin Raviv, Tammy},
booktitle={Computational Diffusion MRI: MICCAI Workshop, Quebec, Canada, 2018},
pages={191--204},
year={2018},
organization={Springer}
}copy to clipboardWe present the concept of fiber-flux density for locally quantifying white matter (WM) fiber bundles. By combining scalar diffusivity measures (e.g., fractional anisotropy) with fiber-flux measurements, we define new local descriptors called Fiber-Flux Diffusion Density (FFDD) vectors. Applying each descriptor throughout fiber bundles allows along-tract coupling of a specific diffusion measure with geometrical properties, such as fiber orientation and coherence. A key step in the proposed framework is the construction of an FFDD dissimilarity measure for sub-voxel alignment of fiber bundles, based on the fast marching method (FMM). The obtained aligned WM tract-profiles enable meaningful inter-subject comparisons and group-wise statistical analysis. We demonstrate our method using two different datasets of contact sports players . Along-tract pairwise comparison as well as group-wise analysis, with respect to non-player healthy controls, reveal significant and spatially-consistent FFDD anomalies. Comparing our method with along-tract FA analysis shows improved sensitivity to subtle structural anomalies in football players over standard FA measurements.
@inproceedings{shitrit2017accelerated,
title={Accelerated magnetic resonance imaging by adversarial neural network},
author={Shitrit, Ohad and Riklin Raviv, Tammy},
booktitle={International Workshop on Deep Learning in Medical Image Analysis},
pages={30--38},
year={2017},
organization={Springer}
}copy to clipboardA main challenge in Magnetic Resonance Imaging (MRI) for clinical applications is speeding up scan time. Beyond the improvement of patient experience and the reduction of operational costs, faster scans are essential for time-sensitive imaging, where target movement is unavoidable, yet must be significantly lessened, e.g., fetal MRI, cardiac cine, and lungs imaging. Moreover, short scan time can enhance temporal resolution in dynamic scans, such as functional MRI or dynamic contrast enhanced MRI. Current imaging methods facilitate MRI acquisition at the price of lower spatial resolution and costly hardware solutions.
We introduce a practical, software-only framework, based on deep learning, for accelerating MRI scan time allows maintaining good quality imaging. This is accomplished by partial MRI sampling, while using an adversarial neural network to estimate the missing samples. The inter-play between the generator and the discriminator networks enables the introduction of an adversarial cost in addition to a fidelity loss used for optimizing the peak signal-to-noise ratio (PSNR). Promising image reconstruction results are obtained for 1.5T MRI where only 52% of the original data are used.
@inproceedings{riklin2017multinomial,
title={Multinomial level-set framework for multi-region image segmentation},
author={Riklin Raviv, Tammy},
booktitle={International Conference on Scale Space and Variational Methods in Computer Vision},
pages={386--395},
year={2017},
organization={Springer}
}copy to clipboardWe present a simple and elegant level-set framework for multi-region image segmentation. The key idea is based on replacing the traditional regularized Heaviside function with the multinomial logistic regression function, commonly known as Softmax. Segmentation is addressed by solving an optimization problem which considers the image intensities likelihood, a regularizer, based on boundary smoothness, and a pairwise region interactive term, which is naturally derived from the proposed formulation. We demonstrate our method on challenging multi-modal segmentation of MRI scans (4D) of brain tumor patients. Promising results are obtained for image partition into the different healthy brain tissues and the malignant regions.
@inproceedings{benou2016noising,
title={De-noising of contrast-enhanced MRI sequences by an ensemble of expert deep neural networks},
author={Benou, Ariel and Veksler, Ronel and Friedman, Alon and Riklin Raviv, Tammy},
booktitle={International Workshop on Deep Learning in Medical Image Analysis},
pages={95--110},
year={2016},
organization={Springer}
}copy to clipboardDynamic contrast-enhanced MRI (DCE-MRI) is an imaging protocol where MRI scans are acquired repetitively throughout the injection of a contrast agent. The analysis of dynamic scans is widely used for the detection and quantification of blood brain barrier (BBB) permeability. Extraction of the pharmacokinetic (PK) parameters from the DCE-MRI washout curves allows quantitative assessment of the BBB functionality. Nevertheless, curve fitting required for the analysis of DCE-MRI data is error-prone as the dynamic scans are subject to non-white, spatially-dependent and anisotropic noise that does not fit standard noise models. The two existing approaches i.e. curve smoothing and image de-noising can either produce smooth curves but cannot guaranty fidelity to the PK model or cannot accommodate the high variability in noise statistics in time and space.
We present a novel framework based on Deep Neural Networks (DNNs) to address the DCE-MRI de-noising challenges. The key idea is based on an ensembling of expert DNNs, where each is trained for different noise characteristics and curve prototypes to solve an inverse problem on a specific subset of the input space. The most likely reconstruction is then chosen using a classifier DNN. As ground-truth (clean) signals for training are not available, a model for generating realistic training sets with complex nonlinear dynamics is presented. The proposed approach has been applied to DCE-MRI scans of stroke and brain tumor patients and is shown to favorably compare to state-of-the-art de-noising methods, without degrading the contrast of the original images.
@inproceedings{gordon2016co,
title={Co-segmentation of multiple images into multiple regions: Application to mouse brain MRI},
author={Gordon, Shiri and Dolgopyat, Irit and Kahn, Itamar and Riklin Raviv, Tammy},
booktitle={2016 IEEE 13th International Symposium on Biomedical Imaging (ISBI)},
pages={399--402},
year={2016},
organization={IEEE}
}copy to clipboardChallenging biomedical segmentation problems can be addressed by combining top-down information based on the known anatomy along with bottom-up models of the image data. Anatomical priors can be provided by probabilistic atlases. Nevertheless, in many cases the available atlases are inadequate. We present a novel method for the co-segmentation of multiple images into multiple regions, where only a very few annotated examples exist. The underlying, unknown anatomy is learned throughout an interleaved process, in which the segmentation of a region is supported both by the segmentation of the neighboring regions which share common boundaries and by the segmentation of corresponding regions in the other jointly segmented images. The method is applied to a mouse brain MRI dataset for the segmentation of five anatomical structures. Experimental results demonstrate the segmentation accuracy with respect to the data complexity.
@inproceedings{shitrit2014probabilistic,
title={Probabilistic model for 3d interactive segmentation},
author={Shitrit, Ohad and Hershkovich, Tsachi and Shalmon, Tamar and Shelef, Ilan and Riklin Raviv, Tammy}
}copy to clipboardFully-automated segmentation algorithms offer fast, objective, and reproducible results for large data collections. However these techniques cannot handle tasks that require contextual knowledge not readily available in the images alone. Thus, the expertise of an experienced physician is necessary.
We present a generative approach to image segmentation, which supports an intuitive and convenient user interaction subject to the bottomup constraints introduced by the image intensities. The user “dialogue” with the segmentation algorithm, via several mouse clicks in regions of disagreement, is formulated as an additional, spatial term in a global cost functional for 3D segmentation. The method is exemplified for the segmentation of cerebral hemorrhages (CH) in human brain CT scans.
@inproceedings{riklin2012statistical,
title={Statistical shape analysis for population studies via level-set based shape morphing},
author={Riklin Raviv, Tammy and Gao, Yi and Levitt, James J and Bouix, Sylvain},
booktitle={European Conference on Computer Vision},
pages={42--51},
year={2012},
organization={Springer}
}copy to clipboardWe present a method that allows the detection, localization and quantification of statistically significant morphological differences in complex brain structures between populations. This is accomplished by a novel level-set framework for shape morphing and a multi-shape dissimilarity-measure derived by a modified version of the Hausdorff distance. The proposed method does not require explicit one-to-one point correspondences and is fast, robust and easy to implement regardless of the topological complexity of the anatomical surface under study.
The proposed model has been applied to different populations using a variety of brain structures including left and right striatum, caudate, amygdala-hippocampal complex and superior- temporal gyrus (STG) in normal controls and patients. The synthetic databases allow quantitative evaluations of the proposed algorithm while the results obtained for the real clinical data are in line with published findings on gray matter reduction in the tested cortical and sub-cortical structures in schizophrenia patients.
@article{arbellebgu,
title={BGU-IL (5) DESCRIPTION},
author={Arbelle, Assaf and Cohen, Shaked and Riklin Raviv, Tammy and Ben-Haim, Tal}
}copy to clipboardWe address individual cells’ segmentation from microscopy sequences. The main challenge in this type of problems is not only foreground-background classification but also the separation of adjacent cells. We apply two orthogonal approaches to overcome the multiple instance problem. From the segmentation perspective, we adopt the three class loss used by [1],[2]. The segmentation representation is designed to enhance individual cells’ delineation by a partitioning of image domain into three classes: foreground, background and cell contours. From the detection perspective, we get our inspiration from [3], and aim to detect rough cell markers. The markers, as opposed to the full segmentation, do not cover the entire cell, but are rather a small” blob” somewhere within the cell. The markers have two desirable properties. First, they are much smaller than the object and thus are easier to separate between instances. One marker will never overlap or touch boundaries with a neighboring marker. Second, the markers are easy to annotate, as the annotator does not need to be precise, making data acquisition a simpler task. Often, for microscopy image sequences, the only available annotation is in the form of markers or approximate cell centers. We train the proposed network to estimate both the segmentation and the markers and merge the two using the Fast Marching Distance (FMD)[4]. The entire framework is illustrated in Figure 1.
@article{salvadoisbi,
title={ISBI organizing committee},
author={Salvado, Olivier and Egan, Gary and Barrutia, Arrate Munoz and Ordidge, Plenaries Roger and Mousavi, Parvin and Zalesky, Tutorials Andrew and Acosta, Oscar and Meriaudeau, Fabrice and Riklin Raviv, Tammy and Aylward, Challenges Stephen and others}
}copy to clipboardProvides a listing of current committee members and society officers.

