Git Product home page Git Product logo

Comments (16)

kavilivishnu avatar kavilivishnu commented on June 27, 2024

@MAlmuzaini ,
The "build_unet" method, accepts only 2 arguments.

1st argument - The shape of the input as tuple - (H, W)
2nd argument - Number of Dimensions / Number of classes - 3

So, you could do:

input = (H, W) which in your case would be - (512, 512).
dims = 3

Then call the class

model = build_unet(input, dims).

This could solve the issue at least to a certain degree.

Let me know.

Thanks.

from tensorflow.

MAlmuzaini avatar MAlmuzaini commented on June 27, 2024

i tried the this code "
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import numpy as np
import cv2
from glob import glob
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import Recall, Precision
from model import build_unet
from metrics import dice_loss, dice_coef, iou

""" Global parameters """
H = 1760
W = 1760

input = (H, W)
dims = 3

def create_dir(path):
""" Create a directory. """
if not os.path.exists(path):
os.makedirs(path)

def load_data(path, split=0.1):
images = sorted(glob(os.path.join(path, "Test_Images", ".png")))
masks1 = sorted(glob(os.path.join(path, "Test_Mask", "Left_Mask", "
.png")))
masks2 = sorted(glob(os.path.join(path, "Test_Mask", "Right_Mask", "*.png")))

split_size = int(len(images) * split)

train_x, valid_x = train_test_split(images, test_size=split_size, random_state=42)
train_y1, valid_y1 = train_test_split(masks1, test_size=split_size, random_state=42)
train_y2, valid_y2 = train_test_split(masks2, test_size=split_size, random_state=42)

train_x, test_x = train_test_split(train_x, test_size=split_size, random_state=42)
train_y1, test_y1 = train_test_split(train_y1, test_size=split_size, random_state=42)
train_y2, test_y2 = train_test_split(train_y2, test_size=split_size, random_state=42)

return (train_x, train_y1, train_y2), (valid_x, valid_y1, valid_y2), (test_x, test_y1, test_y2)

def read_image(path):
x = cv2.imread(path, cv2.IMREAD_COLOR)
x = cv2.resize(x, (W, H))
x = x/255.0
x = x.astype(np.float32)
return x

def read_mask(path1, path2):
x1 = cv2.imread(path1, cv2.IMREAD_GRAYSCALE)
x2 = cv2.imread(path2, cv2.IMREAD_GRAYSCALE)
x = x1 + x2
x = cv2.resize(x, (W, H))
x = x/np.max(x)
x = x > 0.5
x = x.astype(np.float32)
x = np.expand_dims(x, axis=-1)
return x

def tf_parse(x, y1, y2):
def _parse(x, y1, y2):
x = x.decode()
y1 = y1.decode()
y2 = y2.decode()

    x = read_image(x)
    y = read_mask(y1, y2)
    return x, y

x, y = tf.numpy_function(_parse, [x, y1, y2], [tf.float32, tf.float32])
x.set_shape([H, W, 3])
y.set_shape([H, W, 1])
return x, y

def tf_dataset(X, Y1, Y2, batch=8):
dataset = tf.data.Dataset.from_tensor_slices((X, Y1, Y2))
dataset = dataset.shuffle(buffer_size=200)
dataset = dataset.map(tf_parse)
dataset = dataset.batch(batch)
dataset = dataset.prefetch(4)
return dataset

if name == "main":
""" Seeding """
np.random.seed(42)
tf.random.set_seed(42)

""" Directory for storing files """
create_dir("files")

""" Hyperparameters """
batch_size = 2
lr = 1e-5
num_epochs = 10
model_path = os.path.join("files", "model.keras")
csv_path = os.path.join("files", "data.csv")

""" Dataset """
dataset_path = "Lung"
(train_x, train_y1, train_y2), (valid_x, valid_y1, valid_y2), (test_x, test_y1, test_y2) = load_data(dataset_path)

print(f"Train: {len(train_x)} - {len(train_y1)} - {len(train_y2)}")
print(f"Valid: {len(valid_x)} - {len(valid_y1)} - {len(valid_y2)}")
print(f"Test: {len(test_x)} - {len(test_y1)} - {len(test_y2)}")

train_dataset = tf_dataset(train_x, train_y1, train_y2, batch=batch_size)
valid_dataset = tf_dataset(valid_x, valid_y1, valid_y2, batch=batch_size)

""" Model """
model = build_unet(input_shape=(input, dims))
metrics = [dice_coef, iou, Recall(), Precision()]
model.compile(loss=dice_loss, optimizer=Adam(lr), metrics=metrics)

callbacks = [
    ModelCheckpoint(model_path, verbose=1, save_best_only=True),
    ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-7, verbose=1),
    CSVLogger(csv_path)
]

model.fit(
    train_dataset,
    epochs=num_epochs,
    validation_data=valid_dataset,
    callbacks=callbacks
)

"

and I specified the dimensions to be "1760" as it's the dimensions of my picture, howewver, this problem represents

" Train: 13 - 13 - 13
Valid: 1 - 1 - 1
Test: 1 - 1 - 1
Traceback (most recent call last):
File "C:\‏‏UNET\train4.py", line 111, in
model = build_unet((input, dims))
^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\‏‏UNET\model.py", line 27, in build_unet
inputs = Input(input_shape)
^^^^^^^^^^^^^^^^^^
File "C:\Users\mawadh\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\src\layers\core\input_layer.py", line 143, in Input
layer = InputLayer(
^^^^^^^^^^^
File "C:\Users\mawadh\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\src\layers\core\input_layer.py", line 46, in init
shape = backend.standardize_shape(shape)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\mawadh\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\src\backend\common\variables.py", line 549, in standardize_shape
if not is_int_dtype(type(e)):
^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\mawadh\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\src\backend\common\variables.py", line 580, in is_int_dtype
dtype = standardize_dtype(dtype)
^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\mawadh\AppData\Local\Programs\Python\Python311\Lib\site-packages\keras\src\backend\common\variables.py", line 521, in standardize_dtype
raise ValueError(f"Invalid dtype: {dtype}")
ValueError: Invalid dtype: tuple"

from tensorflow.

kavilivishnu avatar kavilivishnu commented on June 27, 2024

Hi @MAlmuzaini,

I believe the representation that you have given is slightly different from the standard one.

Only the input_shape has to be passed as a tuple to build_unet*().
The dimensions has to be passed as the number itself.

I see that there is a slight syntax error in the code:

In the code that you have shared:

model = build_unet(input_shape=(input, dims))

The standard way:

Type

input = (1760, 1760)
dims = 3
model = build_unet((input), dims)

Therefore,

  1. The first argument build_unet, takes is the input_shape. It will be a tuple
  2. The second argument will be just a number. Has to of integer type.

Make sure to pass the arguments correctly.

Visit this page to get a thorough understanding about U-Net and it's working - https://github.com/christianversloot/machine-learning-articles/blob/main/how-to-build-a-u-net-for-image-segmentation-with-tensorflow-and-keras.md

That will help.

Let me know if you have any questions.

Thanks.

from tensorflow.

MAlmuzaini avatar MAlmuzaini commented on June 27, 2024

Thanks for your respond, I used the standard way "model = build_unet((input), dims)" and also tried " model = build_unet(input, dims) "

but I got this error

Train: 13 - 13 - 13
Valid: 1 - 1 - 1
Test: 1 - 1 - 1
Traceback (most recent call last):
File "C:\‏‏UNET\train4.py", line 111, in
model = build_unet((input), dims)
^^^^^^^^^^^^^^^^^^^^^^^^^
TypeError: build_unet() takes 1 positional argument but 2 were given

from tensorflow.

kavilivishnu avatar kavilivishnu commented on June 27, 2024

So, you see the same error even when you did - model = build_unet(input, dims)?

from tensorflow.

MAlmuzaini avatar MAlmuzaini commented on June 27, 2024

from tensorflow.

kavilivishnu avatar kavilivishnu commented on June 27, 2024

Okay. Can you send me a Screenshot (if you are okay with it) of the error?

from tensorflow.

MAlmuzaini avatar MAlmuzaini commented on June 27, 2024

Yes Sure
Screenshot 2024-06-16 225301

from tensorflow.

kavilivishnu avatar kavilivishnu commented on June 27, 2024

Can you share the build_unet function here?

from tensorflow.

MAlmuzaini avatar MAlmuzaini commented on June 27, 2024

def build_unet(input_shape):
inputs = Input(input_shape)

s1, p1 = encoder_block(inputs, 64)
s2, p2 = encoder_block(p1, 128)
s3, p3 = encoder_block(p2, 256)
s4, p4 = encoder_block(p3, 512)

b1 = conv_block(p4, 1024)

d1 = decoder_block(b1, s4, 512)
d2 = decoder_block(d1, s3, 256)
d3 = decoder_block(d2, s2, 128)
d4 = decoder_block(d3, s1, 64)

outputs = Conv2D(1, 1, padding="same", activation="sigmoid")(d4)

model = Model(inputs, outputs, name="U-Net")
return model

from tensorflow.

kavilivishnu avatar kavilivishnu commented on June 27, 2024

Yes. As you can see, the build_unet function itself is not accepting the 2nd argument. We should add th 2nd argument dimensions and then integrate the value in the function.

Did you write the function or adopt it from elsewhere?

from tensorflow.

MAlmuzaini avatar MAlmuzaini commented on June 27, 2024

from tensorflow.

kavilivishnu avatar kavilivishnu commented on June 27, 2024

Okay. That makes sense.

Are you able to find any proper documentation about their implementation or the code that they used this function in?

from tensorflow.

MAlmuzaini avatar MAlmuzaini commented on June 27, 2024

from tensorflow.

Related Issues (20)

Recommend Projects

  • React photo React

    A declarative, efficient, and flexible JavaScript library for building user interfaces.

  • Vue.js photo Vue.js

    🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.

  • Typescript photo Typescript

    TypeScript is a superset of JavaScript that compiles to clean JavaScript output.

  • TensorFlow photo TensorFlow

    An Open Source Machine Learning Framework for Everyone

  • Django photo Django

    The Web framework for perfectionists with deadlines.

  • D3 photo D3

    Bring data to life with SVG, Canvas and HTML. 📊📈🎉

Recommend Topics

  • javascript

    JavaScript (JS) is a lightweight interpreted programming language with first-class functions.

  • web

    Some thing interesting about web. New door for the world.

  • server

    A server is a program made to process requests and deliver data to clients.

  • Machine learning

    Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.

  • Game

    Some thing interesting about game, make everyone happy.

Recommend Org

  • Facebook photo Facebook

    We are working to build community through open source technology. NB: members must have two-factor auth.

  • Microsoft photo Microsoft

    Open source projects and samples from Microsoft.

  • Google photo Google

    Google ❤️ Open Source for everyone.

  • D3 photo D3

    Data-Driven Documents codes.