Git Product home page Git Product logo

saratan's People

Contributors

flettling avatar johrausch avatar mbickel avatar mohamed-ezz avatar patrickchrist avatar

Stargazers

 avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar  avatar

Watchers

 avatar  avatar  avatar  avatar

saratan's Issues

Pretrained Model available ?

Hi,
I searched through the repo and couldnt find any link to a pretrained model that I can run and reproduce results. Pardon me if I oversaw and missed it

Need help in training Step2 for lesion

I'm using below protoxt, what's wrong? I couldn't able to predict lesion:

name: "phseg_v5"
force_backward: true

layer {
name: "data"
type: "Python"
top: "data"
top: "label"
python_param {
module: "numpy_data_layer"
layer: "NumpyTrainDataLayer"
}
include: { phase: TRAIN }
}

layer {
name: "data"
type: "Python"
top: "data"
top: "label"
python_param {
module: "numpy_data_layer"
layer: "NumpyTestDataLayer"
}
include: { phase: TEST }
}

layer {
name: "conv_d0a-b"
type: "Convolution"
bottom: "data"
top: "d0b"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d0b"
type: "ReLU"
bottom: "d0b"
top: "d0b"
}
layer {
name: "conv_d0b-c"
type: "Convolution"
bottom: "d0b"
top: "d0c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d0c"
type: "ReLU"
bottom: "d0c"
top: "d0c"
}
layer {
name: "pool_d0c-1a"
type: "Pooling"
bottom: "d0c"
top: "d1a"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv_d1a-b"
type: "Convolution"
bottom: "d1a"
top: "d1b"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d1b"
type: "ReLU"
bottom: "d1b"
top: "d1b"
}
layer {
name: "conv_d1b-c"
type: "Convolution"
bottom: "d1b"
top: "d1c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d1c"
type: "ReLU"
bottom: "d1c"
top: "d1c"
}
layer {
name: "pool_d1c-2a"
type: "Pooling"
bottom: "d1c"
top: "d2a"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv_d2a-b"
type: "Convolution"
bottom: "d2a"
top: "d2b"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d2b"
type: "ReLU"
bottom: "d2b"
top: "d2b"
}
layer {
name: "conv_d2b-c"
type: "Convolution"
bottom: "d2b"
top: "d2c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d2c"
type: "ReLU"
bottom: "d2c"
top: "d2c"
}
layer {
name: "pool_d2c-3a"
type: "Pooling"
bottom: "d2c"
top: "d3a"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv_d3a-b"
type: "Convolution"
bottom: "d3a"
top: "d3b"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d3b"
type: "ReLU"
bottom: "d3b"
top: "d3b"
}
layer {
name: "conv_d3b-c"
type: "Convolution"
bottom: "d3b"
top: "d3c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d3c"
type: "ReLU"
bottom: "d3c"
top: "d3c"
}

layer {
name: "pool_d3c-4a"
type: "Pooling"
bottom: "d3c"
top: "d4a"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv_d4a-b"
type: "Convolution"
bottom: "d4a"
top: "d4b"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1024
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d4b"
type: "ReLU"
bottom: "d4b"
top: "d4b"
}
layer {
name: "conv_d4b-c"
type: "Convolution"
bottom: "d4b"
top: "d4c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1024
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "relu_d4c"
type: "ReLU"
bottom: "d4c"
top: "d4c"
}

layer {
name: "upconv_d4c_u3a"
type: "Deconvolution"
bottom: "d4c"
top: "u3a"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 0
kernel_size: 2
stride: 2
weight_filler {
type: "xavier"
}
}
}

layer {
name: "relu_u3a"
type: "ReLU"
bottom: "u3a"
top: "u3a"
}
layer {
name: "crop_d3c-d3cc"
type: "Crop"
bottom: "d3c"
bottom: "u3a"
top: "d3cc"

}
layer {
name: "concat_d3cc_u3a-b"
type: "Concat"
bottom: "u3a"
bottom: "d3cc"
top: "u3b"
}
layer {
name: "conv_u3b-c"
type: "Convolution"
bottom: "u3b"
top: "u3c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u3c"
type: "ReLU"
bottom: "u3c"
top: "u3c"
}
layer {
name: "conv_u3c-d"
type: "Convolution"
bottom: "u3c"
top: "u3d"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u3d"
type: "ReLU"
bottom: "u3d"
top: "u3d"
}
layer {
name: "upconv_u3d_u2a"
type: "Deconvolution"
bottom: "u3d"
top: "u2a"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 0
kernel_size: 2
stride: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu_u2a"
type: "ReLU"
bottom: "u2a"
top: "u2a"
}
layer {
name: "crop_d2c-d2cc"
type: "Crop"
bottom: "d2c"
bottom: "u2a"
top: "d2cc"

}
layer {
name: "concat_d2cc_u2a-b"
type: "Concat"
bottom: "u2a"
bottom: "d2cc"
top: "u2b"
}
layer {
name: "conv_u2b-c"
type: "Convolution"
bottom: "u2b"
top: "u2c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u2c"
type: "ReLU"
bottom: "u2c"
top: "u2c"
}
layer {
name: "conv_u2c-d"
type: "Convolution"
bottom: "u2c"
top: "u2d"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u2d"
type: "ReLU"
bottom: "u2d"
top: "u2d"
}
layer {
name: "upconv_u2d_u1a"
type: "Deconvolution"
bottom: "u2d"
top: "u1a"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 0
kernel_size: 2
stride: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu_u1a"
type: "ReLU"
bottom: "u1a"
top: "u1a"
}
layer {
name: "crop_d1c-d1cc"
type: "Crop"
bottom: "d1c"
bottom: "u1a"
top: "d1cc"

}
layer {
name: "concat_d1cc_u1a-b"
type: "Concat"
bottom: "u1a"
bottom: "d1cc"
top: "u1b"
}
layer {
name: "conv_u1b-c"
type: "Convolution"
bottom: "u1b"
top: "u1c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u1c"
type: "ReLU"
bottom: "u1c"
top: "u1c"
}
layer {
name: "conv_u1c-d"
type: "Convolution"
bottom: "u1c"
top: "u1d"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u1d"
type: "ReLU"
bottom: "u1d"
top: "u1d"
}
layer {
name: "upconv_u1d_u0a_NEW"
type: "Deconvolution"
bottom: "u1d"
top: "u0a"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 2
stride: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "relu_u0a"
type: "ReLU"
bottom: "u0a"
top: "u0a"
}
layer {
name: "crop_d0c-d0cc"
type: "Crop"
bottom: "d0c"
bottom: "u0a"
top: "d0cc"

}
layer {
name: "concat_d0cc_u0a-b"
type: "Concat"
bottom: "u0a"
bottom: "d0cc"
top: "u0b"
}
layer {
name: "conv_u0b-c_New"
type: "Convolution"
bottom: "u0b"
top: "u0c"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u0c"
type: "ReLU"
bottom: "u0c"
top: "u0c"
}
layer {
name: "conv_u0c-d_New"
type: "Convolution"
bottom: "u0c"
top: "u0d"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 0
kernel_size: 3
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}
layer {
name: "relu_u0d"
type: "ReLU"
bottom: "u0d"
top: "u0d"
}
layer {
name: "conv_u0d-score_New"
type: "Convolution"
bottom: "u0d"
top: "score"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 2
pad: 0
kernel_size: 1
weight_filler {
type: "xavier"
}
engine: CAFFE
}
}

layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "score"
bottom: "label"
top: "loss"
include {
phase: TRAIN
}
softmax_param {engine: CAFFE}
loss_param {
weight_by_label_freqs: true
ignore_label: 2
class_weighting: 0.25
class_weighting: 9.664
}

}
layer {
name: "prob"
type: "Softmax"
bottom: "score"
top: "prob"
include {
phase: TEST
}
}

layer {
name: "accuracy"
type: "Accuracy"
bottom: "score"
bottom: "label"
top: "accuracy"
accuracy_param {
ignore_label: 2
}
include {
phase: TEST
}
}

About CRF's parameter tuning

Hi, Mohamed,
I noticed that you use NLopt to tuning CRF's parameters. Once I use this library to tune the parameters and I got bad result.
I use your CRF parameter after liver inference and got good result. But when I tuned the parameter the dice was going to approach the dice which got without CRF. So does the lesion inference.
I wonder know why. Thank you!@mohamed-ezz

Using CRF after net2

Hi,
I'm learning your codes in these days and I added CRF after net1 and got a good result.
But When I add CRF after net2 I got {'voe': 1.0, 'dice': 0.0, 'rvd': -1.0, 'assd': 0, 'jaccard': 0.0, 'msd': 0}.
Could you please tell why or would you mind giving me your CRF processing code after net2 ?

Thanks

How to make sure the liver is always on the left?

Hello,
I read your codes and I noticed that you said in the function step1_preprocess_img_slice has this comment below:
1- Rotate the input volume so the the liver is on the left, spine is at the bottom of the image
I wonder know how can you make sure the liver is always on the left. I'll appricate if you could help me. Thanks.

No training code is available?

Hello. I want to use your net to train my own dataset from scratch. BuT no code for training is available. Please tell me how to configure this net for my own training.

Thanks

why did you use thresholding?

Hi, I see your source and your paper, both of them were very useful.
I saw you used thresholding in the last part of network training, but I couldn't find anything in the paper, about it.
Could you please explain the matter of using thresholding and the type of thresholding you used?

CRF returning all 0's

On giving the CRF an image of W,H,D and its probability mask of W,H,D,L where L = 1 I am getting a matrix of 0's, I tried running the CRF optimizer but I got a matrix of 0's for all iterations
Can you help me out with the same also can this CRF be used for datasets other than the liver as a general segmentation improvement postprocessing step?

How to read your code?

I only have 3D-IRCADb dataset.
(1) I should how to process my data so that i can train?
(2) So i should how to read '/data' and '/data/layers' folders?
I only know the function of numpy_data_layer.py.
Because i dont have .nii format data in 3D-IRCADb dataset, I annotated the code
# image_filename = os.path.join(volume_fulldirname,"image"+volume_id+".nii")
# flip_volume(image_filename, os.path.join(OUTPUT_PATH, "image%.2d"%int(volume_id)+".nii")) in irca_to_nii.py.

(3) What is the link to fire3 dataset?

How to prepare the train data?

I fellow this script data/irca_to_nii.py.
I downloaded the 3D-IRCADb-01 dataset ran this scipt.
But there are some errrors.

➜  3Dircadb1 python ../CFCN/saratan/data/irca_to_nii.py
Volume 3Dircadb1.1
	 Flipping left-right ./3Dircadb1.1/image1.nii to ./niftis_segmented_all/image01.nii
Traceback (most recent call last):
  File "../CFCN/saratan/data/irca_to_nii.py", line 86, in <module>
    flip_volume(image_filename, os.path.join(OUTPUT_PATH, "image%.2d"%int(volume_id)+".nii"))
  File "../CFCN/saratan/data/irca_to_nii.py", line 27, in flip_volume
    volume = nibabel.load(input_filename).get_data()
  File "/usr/local/lib/python2.7/dist-packages/nibabel/loadsave.py", line 40, in load
    raise FileNotFoundError("No such file: '%s'" % filename)
nibabel.py3k.FileNotFoundError: No such file: './3Dircadb1.1/image1.nii'

so, where is the 3Dircadb1.1/image1.nii file?

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.