numpy.float32怎么使用

其他教程   发布日期:2025年04月20日   浏览次数:198

本篇内容主要讲解“numpy.float32怎么使用”,感兴趣的朋友不妨来看看。本文介绍的方法操作简单快捷,实用性强。下面就让小编来带大家学习“numpy.float32怎么使用”吧!

示例1:draw_image

  1. import numpy as np
  2. from numpy import float32
  3. def draw_image(self, img, color=[0, 255, 0], alpha=1.0, copy=True, from_img=None):
  4. if copy:
  5. img = np.copy(img)
  6. orig_dtype = img.dtype
  7. if alpha != 1.0 and img.dtype != np.float32:
  8. img = img.astype(np.float32, copy=False)
  9. for rect in self:
  10. if from_img is not None:
  11. rect.resize(from_img, img).draw_on_image(img, color=color, alpha=alpha, copy=False)
  12. else:
  13. rect.draw_on_image(img, color=color, alpha=alpha, copy=False)
  14. if orig_dtype != img.dtype:
  15. img = img.astype(orig_dtype, copy=False)
  16. return img

示例2:generate_moving_mnist

  1. import numpy as np
  2. from numpy import float32
  3. def generate_moving_mnist(self, num_digits=2):
  4. '''
  5. Get random trajectories for the digits and generate a video.
  6. '''
  7. data = np.zeros((self.n_frames_total, self.image_size_, self.image_size_), dtype=np.float32)
  8. for n in range(num_digits):
  9. # Trajectory
  10. start_y, start_x = self.get_random_trajectory(self.n_frames_total)
  11. ind = random.randint(0, self.mnist.shape[0] - 1)
  12. digit_image = self.mnist[ind]
  13. for i in range(self.n_frames_total):
  14. top = start_y[i]
  15. left = start_x[i]
  16. bottom = top + self.digit_size_
  17. right = left + self.digit_size_
  18. # Draw digit
  19. data[i, top:bottom, left:right] = np.maximum(data[i, top:bottom, left:right], digit_image)
  20. data = data[..., np.newaxis]
  21. return data

示例3:wav_format

  1. import numpy as np
  2. from numpy import float32
  3. def wav_format(self, input_wave_file, output_wave_file, target_phrase):
  4. pop_size = 100
  5. elite_size = 10
  6. mutation_p = 0.005
  7. noise_stdev = 40
  8. noise_threshold = 1
  9. mu = 0.9
  10. alpha = 0.001
  11. max_iters = 3000
  12. num_points_estimate = 100
  13. delta_for_gradient = 100
  14. delta_for_perturbation = 1e3
  15. input_audio = load_wav(input_wave_file).astype(np.float32)
  16. pop = np.expand_dims(input_audio, axis=0)
  17. pop = np.tile(pop, (pop_size, 1))
  18. output_wave_file = output_wave_file
  19. target_phrase = target_phrase
  20. funcs = setup_graph(pop, np.array([toks.index(x) for x in target_phrase]))

示例4:get_rois_blob

  1. import numpy as np
  2. from numpy import float32
  3. def get_rois_blob(im_rois, im_scale_factors):
  4. """Converts RoIs into network inputs.
  5. Arguments:
  6. im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
  7. im_scale_factors (list): scale factors as returned by _get_image_blob
  8. Returns:
  9. blob (ndarray): R x 5 matrix of RoIs in the image pyramid
  10. """
  11. rois_blob_real = []
  12. for i in range(len(im_scale_factors)):
  13. rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
  14. rois_blob = np.hstack((levels, rois))
  15. rois_blob_real.append(rois_blob.astype(np.float32, copy=False))
  16. return rois_blob_real

示例5:generate_anchors_pre

  1. import numpy as np
  2. from numpy import float32
  3. def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
  4. """ A wrapper function to generate anchors given different scales
  5. Also return the number of anchors in variable 'length'
  6. """
  7. anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
  8. A = anchors.shape[0]
  9. shift_x = np.arange(0, width) * feat_stride
  10. shift_y = np.arange(0, height) * feat_stride
  11. shift_x, shift_y = np.meshgrid(shift_x, shift_y)
  12. shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
  13. K = shifts.shape[0]
  14. # width changes faster, so here it is H, W, C
  15. anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
  16. anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
  17. length = np.int32(anchors.shape[0])
  18. return anchors, length

示例6:draw_heatmap

  1. import numpy as np
  2. from numpy import float32
  3. def draw_heatmap(img, heatmap, alpha=0.5):
  4. """Draw a heatmap overlay over an image."""
  5. assert len(heatmap.shape) == 2 or
  6. (len(heatmap.shape) == 3 and heatmap.shape[2] == 1)
  7. assert img.dtype in [np.uint8, np.int32, np.int64]
  8. assert heatmap.dtype in [np.float32, np.float64]
  9. if img.shape[0:2] != heatmap.shape[0:2]:
  10. heatmap_rs = np.clip(heatmap * 255, 0, 255).astype(np.uint8)
  11. heatmap_rs = ia.imresize_single_image(
  12. heatmap_rs[..., np.newaxis],
  13. img.shape[0:2],
  14. interpolation="nearest"
  15. )
  16. heatmap = np.squeeze(heatmap_rs) / 255.0
  17. cmap = plt.get_cmap('jet')
  18. heatmap_cmapped = cmap(heatmap)
  19. heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
  20. heatmap_cmapped = heatmap_cmapped * 255
  21. mix = (1-alpha) * img + alpha * heatmap_cmapped
  22. mix = np.clip(mix, 0, 255).astype(np.uint8)
  23. return mix

示例7:maybe_cast_to_float64

  1. import numpy as np
  2. from numpy import float32
  3. def maybe_cast_to_float64(da):
  4. """Cast DataArrays to np.float64 if they are of type np.float32.
  5. Parameters
  6. ----------
  7. da : xr.DataArray
  8. Input DataArray
  9. Returns
  10. -------
  11. DataArray
  12. """
  13. if da.dtype == np.float32:
  14. logging.warning('Datapoints were stored using the np.float32 datatype.'
  15. 'For accurate reduction operations using bottleneck, '
  16. 'datapoints are being cast to the np.float64 datatype.'
  17. ' For more information see: https://github.com/pydata/'
  18. 'xarray/issues/1346')
  19. return da.astype(np.float64)
  20. else:
  21. return da

示例8:in_top_k

  1. import numpy as np
  2. from numpy import float32
  3. def in_top_k(predictions, targets, k):
  4. '''Returns whether the `targets` are in the top `k` `predictions`
  5. # Arguments
  6. predictions: A tensor of shape batch_size x classess and type float32.
  7. targets: A tensor of shape batch_size and type int32 or int64.
  8. k: An int, number of top elements to consider.
  9. # Returns
  10. A tensor of shape batch_size and type int. output_i is 1 if
  11. targets_i is within top-k values of predictions_i
  12. '''
  13. predictions_top_k = T.argsort(predictions)[:, -k:]
  14. result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets]

示例9:ctc_path_probs

  1. import numpy as np
  2. from numpy import float32
  3. def ctc_path_probs(predict, Y, alpha=1e-4):
  4. smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
  5. L = T.log(smoothed_predict)
  6. zeros = T.zeros_like(L[0])
  7. log_first = zeros
  8. f_skip_idxs = ctc_create_skip_idxs(Y)
  9. b_skip_idxs = ctc_create_skip_idxs(Y[::-1]) # there should be a shortcut to calculating this
  10. def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
  11. f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
  12. b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
  13. return f_active_next, log_f_next, b_active_next, log_b_next
  14. [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
  15. step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])
  16. idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
  17. mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
  18. log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
  19. return log_probs, mask

示例10:rmsprop

  1. import numpy as np
  2. from numpy import float32
  3. def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
  4. """
  5. RMSProp.
  6. """
  7. lr = theano.shared(np.float32(lr).astype(floatX))
  8. gradients = self.get_gradients(cost, params,consider_constant)
  9. accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]
  10. updates = []
  11. for param, gradient, accumulator in zip(params, gradients, accumulators):
  12. new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2
  13. updates.append((accumulator, new_accumulator))
  14. new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)
  15. updates.append((param, new_param))
  16. return updates

示例11:adadelta

  1. import numpy as np
  2. from numpy import float32
  3. def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
  4. """
  5. Adadelta. Based on:
  6. http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
  7. """
  8. rho = theano.shared(np.float32(rho).astype(floatX))
  9. epsilon = theano.shared(np.float32(epsilon).astype(floatX))
  10. gradients = self.get_gradients(cost, params,consider_constant)
  11. accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
  12. accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
  13. updates = []
  14. for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas):
  15. new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2.
  16. delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
  17. new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
  18. updates.append((accu_gradient, new_accu_gradient))
  19. updates.append((accu_delta, new_accu_delta))
  20. updates.append((param, param + delta_x))
  21. return updates

示例12:adagrad

  1. import numpy as np
  2. from numpy import float32
  3. def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
  4. """
  5. Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
  6. """
  7. lr = theano.shared(np.float32(lr).astype(floatX))
  8. epsilon = theano.shared(np.float32(epsilon).astype(floatX))
  9. gradients = self.get_gradients(cost, params,consider_constant)
  10. gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
  11. updates = []
  12. for param, gradient, gsum in zip(params, gradients, gsums):
  13. new_gsum = gsum + gradient ** 2.
  14. updates.append((gsum, new_gsum))
  15. updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon))))
  16. return updates

示例13:sgd

  1. import numpy as np
  2. from numpy import float32
  3. def sgd(self, cost, params,constraints={}, lr=0.01):
  4. """
  5. Stochatic gradient descent.
  6. """
  7. updates = []
  8. lr = theano.shared(np.float32(lr).astype(floatX))
  9. gradients = self.get_gradients(cost, params)
  10. for p, g in zip(params, gradients):
  11. v=-lr*g;
  12. new_p=p+v;
  13. # apply constraints
  14. if p in constraints:
  15. c=constraints[p];
  16. new_p=c(new_p);
  17. updates.append((p, new_p))
  18. return updates

示例14:sgdmomentum

  1. import numpy as np
  2. from numpy import float32
  3. def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.):
  4. """
  5. Stochatic gradient descent with momentum. Momentum has to be in [0, 1)
  6. """
  7. # Check that the momentum is a correct value
  8. assert 0 <= momentum < 1
  9. lr = theano.shared(np.float32(lr).astype(floatX))
  10. momentum = theano.shared(np.float32(momentum).astype(floatX))
  11. gradients = self.get_gradients(cost, params)
  12. velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
  13. updates = []
  14. for param, gradient, velocity in zip(params, gradients, velocities):
  15. new_velocity = momentum * velocity - lr * gradient
  16. updates.append((velocity, new_velocity))
  17. new_p=param+new_velocity;
  18. # apply constraints
  19. if param in constraints:
  20. c=constraints[param];
  21. new_p=c(new_p);
  22. updates.append((param, new_p))
  23. return updates

示例15:set_values

  1. import numpy as np
  2. from numpy import float32
  3. def set_values(name, param, pretrained):
  4. """
  5. Initialize a network parameter with pretrained values.
  6. We check that sizes are compatible.
  7. """
  8. param_value = param.get_value()
  9. if pretrained.size != param_value.size:
  10. raise Exception(
  11. "Size mismatch for parameter %s. Expected %i, found %i."
  12. % (name, param_value.size, pretrained.size)
  13. )
  14. param.set_value(np.reshape(
  15. pretrained, param_value.shape
  16. ).astype(np.float32))

以上就是numpy.float32怎么使用的详细内容,更多关于numpy.float32怎么使用的资料请关注九品源码其它相关文章!