1

I am trying to train network for Bounding Box Regression. I've created pd.DataFrame that looks like this: enter image description here

Here are my train and validation image generators:

image_generator = tf.keras.preprocessing.image.ImageDataGenerator(
                                  rescale = 1./255,
                                  rotation_range = 25,
                                  zoom_range=[0.8, 1.2],
                                  vertical_flip=True,
                                  horizontal_flip=True,
                                  )
train_generator = image_generator.flow_from_dataframe(
            dataframe=train_df,
            directory=cbis_ddsm_train_images_dir,
            x_col="image file path",
            y_col="coordinates",
            class_mode="raw",
            batch_size=BATCH_SIZE,
            shuffle=True,
            seed=1,
            color_mode="grayscale",
            target_size=(1024, 1024))

val_gen = tf.keras.preprocessing.image.ImageDataGenerator(
                                  rescale = 1./255,
                                  )

val_generator = val_gen.flow_from_dataframe(
            dataframe=val_df,
            directory=cbis_ddsm_train_images_dir,
            x_col="image file path",
            y_col=None,
            class_mode="raw",
            batch_size=BATCH_SIZE,
            shuffle=False,
            seed=1,
            color_mode="grayscale",
             target_size=(1024, 1024))

Please note that I've converted Y column (i.e. bbox coordinates) from lists into numpy dimensional array's as follows:

for idx, row in train_df.iterrows():
  height, width = row['size']
  row['coordinates'] = np.asarray([normalize_bbox(c, height, width) for c in row['coordinates']][0]).astype('float32')

When I try to execute code below:

opt = Adam(lr=INIT_LR)
final_model.compile(optimizer=opt, loss="mse")
    
final_model.fit(train_generator, steps_per_epoch=steps_per_epoch, epochs=3,
                    validation_data=val_generator, validation_steps=val_steps, verbose=1)

I am getting following error message:

ValueError                                Traceback (most recent call last)
<ipython-input-46-90f3a1cd9c87> in <module>()
      8 
      9     final_model.fit(train_generator, steps_per_epoch=steps_per_epoch, epochs=3,
---> 10                     validation_data=val_generator, validation_steps=val_steps, verbose=1)
     11     final_model.save(os.path.join(experiment1_dir, "resnet_fine-tuned-head.h5"))
     12 

14 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
    106   def _method_wrapper(self, *args, **kwargs):
    107     if not self._in_multi_worker_mode():  # pylint: disable=protected-access
--> 108       return method(self, *args, **kwargs)
    109 
    110     # Running inside `run_distribute_coordinator` already.

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1061           use_multiprocessing=use_multiprocessing,
   1062           model=self,
-> 1063           steps_per_execution=self._steps_per_execution)
   1064 
   1065       # Container that configures and calls `tf.keras.Callback`s.

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution)
   1115         use_multiprocessing=use_multiprocessing,
   1116         distribution_strategy=ds_context.get_strategy(),
-> 1117         model=model)
   1118 
   1119     strategy = ds_context.get_strategy()

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, shuffle, workers, use_multiprocessing, max_queue_size, model, **kwargs)
    914         max_queue_size=max_queue_size,
    915         model=model,
--> 916         **kwargs)
    917 
    918   @staticmethod

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, workers, use_multiprocessing, max_queue_size, model, **kwargs)
    786     peek, x = self._peek_and_restore(x)
    787     peek = self._standardize_batch(peek)
--> 788     peek = _process_tensorlike(peek)
    789 
    790     # Need to build the Model on concrete input shapes.

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py in _process_tensorlike(inputs)
   1019     return x
   1020 
-> 1021   inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)
   1022   return nest.list_to_tuple(inputs)
   1023 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
    633 
    634   return pack_sequence_as(
--> 635       structure[0], [func(*x) for x in entries],
    636       expand_composites=expand_composites)
    637 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
    633 
    634   return pack_sequence_as(
--> 635       structure[0], [func(*x) for x in entries],
    636       expand_composites=expand_composites)
    637 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/data_adapter.py in _convert_numpy_and_scipy(x)
   1014       if issubclass(x.dtype.type, np.floating):
   1015         dtype = backend.floatx()
-> 1016       return ops.convert_to_tensor(x, dtype=dtype)
   1017     elif scipy_sparse and scipy_sparse.issparse(x):
   1018       return _scipy_sparse_to_sparse_tensor(x)

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
   1497 
   1498     if ret is None:
-> 1499       ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
   1500 
   1501     if ret is NotImplemented:

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/tensor_conversion_registry.py in _default_conversion_function(***failed resolving arguments***)
     50 def _default_conversion_function(value, dtype, name, as_ref):
     51   del as_ref  # Unused.
---> 52   return constant_op.constant(value, dtype, name=name)
     53 
     54 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
    262   """
    263   return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 264                         allow_broadcast=True)
    265 
    266 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
    273       with trace.Trace("tf.constant"):
    274         return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 275     return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
    276 
    277   g = ops.get_default_graph()

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
    298 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
    299   """Implementation of eager constant."""
--> 300   t = convert_to_eager_tensor(value, ctx, dtype)
    301   if shape is None:
    302     return t

/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
     96       dtype = dtypes.as_dtype(dtype).as_datatype_enum
     97   ctx.ensure_initialized()
---> 98   return ops.EagerTensor(value, ctx.device_name, dtype)
     99 
    100 

ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type numpy.ndarray).

Any sort of help would be GREATLY appreciated. Thanks!

2
  • 1
    Make sure the array that you are trying to convert has a numeric dtype. I suspect it is an object dtype containing other arrays. Commented Nov 24, 2020 at 0:33
  • How do I do that using generators? Commented Nov 24, 2020 at 0:38

1 Answer 1

1

This is a bug in Keras, reported here: https://github.com/keras-team/keras/issues/13839

Basically, when class_mode == "raw" and the labels are numpy arrays, flow_from_dataframe generates batches for the labels in the shape of an array of numpy arrays rather than a 2D array, which then makes the fit method fail.

As a workaround until it's fixed, add these lines after you create your generators

train_generator._targets = np.stack(train_generator._targets)
val_generator._targets = np.stack(val_generator._targets)
Sign up to request clarification or add additional context in comments.

Comments

Your Answer

By clicking “Post Your Answer”, you agree to our terms of service and acknowledge you have read our privacy policy.

Start asking to get answers

Find the answer to your question by asking.

Ask question

Explore related questions

See similar questions with these tags.