d = pd.read_csv('../data/radiopaedia_cases.csv')
dls = ImageDataLoaders3D.from_df(d,
                                 fn_col = 0,
                                 label_col = 2, # in sample data 0: series, 1: segmentation mask, 2: random binary label
                                 #item_tfms = ResizeCrop3D(crop_by = (0., 0.1, 0.1), resize_to = (20, 150, 150), perc_crop = True),
                                 size_for_resampling = (112, 112, 20),
                                 bs = 2, 
                                 val_bs = 2,
                                 num_workers = 0,
                                 )

CNN learner

The fastai cnn_learner class can be used to construct a 3D CNN with only a few modifications to the original functions. See the faimed3d.layers notebook for details.

cnn_learner_3d[source]

cnn_learner_3d(dls, arch, loss_func=None, pretrained=True, cut=None, splitter=None, y_range=None, config=None, n_out=None, normalize=True, cbs=None, opt_func=Adam, lr=0.001, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95))

Build a convnet style learner from `dls` and `arch`
Same as fastai func but adds the `AddColorChannel` callback.
from torchvision.models.video import r3d_18
learn = cnn_learner_3d(dls, r3d_18, pretrained=False) # pretrained turned off for more speed while testing, default would be to load a pretrained model
learn.fine_tune(1, 0.001)
epoch train_loss valid_loss time
0 0.595849 0.742859 01:30
epoch train_loss valid_loss time
0 0.505662 0.540008 02:11

UNet learner

create_unet_model_3d[source]

create_unet_model_3d(arch, n_out, img_size, n_in, pretrained=True, cut=None, blur=False, blur_final=True, self_attention=False, y_range=None, last_cross=True, bottle=False, act_cls=ReLU, init=kaiming_normal_, norm_type=None)

Create custom unet architecture

unet_learner_3d[source]

unet_learner_3d(dls, arch, normalize=True, n_out=None, pretrained=True, config=None, loss_func=None, opt_func=Adam, lr=0.001, splitter=None, cbs=None, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95), norm_type=<NormType.Batch: 1>, **kwargs)

Build a unet learner from `dls` and `arch`
dls = SegmentationDataLoaders3D.from_df(d,  
                                codes = ['no covid', 'covid'],
                                 item_tfms = ResizeCrop3D(crop_by = (0., 0.1, 0.1), resize_to = (20, 100, 100), perc_crop = True), 
                                 bs = 2, val_bs = 2)
learn = unet_learner_3d(dls, r3d_18, pretrained=False)
learn.fit_one_cycle(1, 0.001)
epoch train_loss valid_loss time
0 0.584870 0.844796 00:07

DeepLab learner

create_deeplab_model_3d[source]

create_deeplab_model_3d(arch, n_out, img_size, n_in, pretrained=True, cut=None, y_range=None, act_cls=ReLU, norm_type=<NormType.Batch: 1>)

Create custom unet architecture

deeplab_learner_3d[source]

deeplab_learner_3d(dls, arch, normalize=True, n_out=None, pretrained=True, loss_func=None, opt_func=Adam, lr=0.001, splitter=None, cbs=None, metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True, moms=(0.95, 0.85, 0.95), norm_type=<NormType.Batch: 1>, **kwargs)

Build a deeplab learner from `dls` and `arch`
dls = SegmentationDataLoaders3D.from_df(d,  
                                codes = ['no covid', 'covid'],
                                 item_tfms = ResizeCrop3D(crop_by = (0., 0.1, 0.1), resize_to = (20, 100, 100), perc_crop = True), 
                                 bs = 2, val_bs = 2)
learn = deeplab_learner_3d(dls, r3d_18, pretrained=False)
learn.fit_one_cycle(1, 0.001)
epoch train_loss valid_loss time
0 0.546531 0.593489 00:02