keras模型可视化:
model: model = Sequential() # input: 100x100 images with 3 channels -> (100, 100, 3) tensors. # this applies 32 convolution filters of size 3x3 each. model.add(ZeroPadding2D((1,1), input_shape=(38, 38, 1))) model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) # model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), activation='relu', padding='same',)) # model.add(Conv2D(64, (3, 3), activation='relu', padding='same',)) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), activation='relu', padding='same',)) # model.add(Conv2D(128, (3, 3), activation='relu', padding='same',)) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(AveragePooling2D((5,5))) model.add(Flatten()) # model.add(Dense(512, activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(label_size, activation='softmax'))
1.层可视化:
test_x = [] img_src = cv2.imdecode(np.fromfile(r'c:\temp.tif', dtype=np.uint8), cv2.IMREAD_GRAYSCALE) img = cv2.resize(img_src, (38, 38), interpolation=cv2.INTER_CUBIC) # img = np.random.randint(0,255,(38,38)) img = (255 - img) / 255 img = np.reshape(img, (38, 38, 1)) test_x.append(img) ################################################################### layer = model.layers[1] weight = layer.get_weights() # print(weight) print(np.asarray(weight).shape) model_v1 = Sequential() # input: 100x100 images with 3 channels -> (100, 100, 3) tensors. # this applies 32 convolution filters of size 3x3 each. model_v1.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1))) model_v1.add(Conv2D(32, (3, 3), activation='relu', padding='same')) # model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model_v1.layers[1].set_weights(weight) re = model_v1.predict(np.array(test_x)) print(np.shape(re)) re = np.transpose(re, (0,3,1,2)) for i in range(32): plt.subplot(4,8,i+1) plt.imshow(re[0][i]) #, cmap='gray' plt.show() ################################################################## model_v2 = Sequential() # input: 100x100 images with 3 channels -> (100, 100, 3) tensors. # this applies 32 convolution filters of size 3x3 each. model_v2.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1))) model_v2.add(Conv2D(32, (3, 3), activation='relu', padding='same')) # model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model_v2.add(BatchNormalization()) model_v2.add(MaxPooling2D(pool_size=(2, 2))) model_v2.add(Dropout(0.25)) model_v2.add(Conv2D(64, (3, 3), activation='relu', padding='same', )) print(len(model_v2.layers)) layer1 = model.layers[1] weight1 = layer1.get_weights() model_v2.layers[1].set_weights(weight1) layer5 = model.layers[5] weight5 = layer5.get_weights() model_v2.layers[5].set_weights(weight5) re2 = model_v2.predict(np.array(test_x)) re2 = np.transpose(re2, (0,3,1,2)) for i in range(64): plt.subplot(8,8,i+1) plt.imshow(re2[0][i]) #, cmap='gray' plt.show() ################################################################## model_v3 = Sequential() # input: 100x100 images with 3 channels -> (100, 100, 3) tensors. # this applies 32 convolution filters of size 3x3 each. model_v3.add(ZeroPadding2D((1, 1), input_shape=(38, 38, 1))) model_v3.add(Conv2D(32, (3, 3), activation='relu', padding='same')) # model.add(Conv2D(32, (3, 3), activation='relu', padding='same')) model_v3.add(BatchNormalization()) model_v3.add(MaxPooling2D(pool_size=(2, 2))) model_v3.add(Dropout(0.25)) model_v3.add(Conv2D(64, (3, 3), activation='relu', padding='same', )) # model.add(Conv2D(64, (3, 3), activation='relu', padding='same',)) model_v3.add(BatchNormalization()) model_v3.add(MaxPooling2D(pool_size=(2, 2))) model_v3.add(Dropout(0.25)) model_v3.add(Conv2D(128, (3, 3), activation='relu', padding='same', )) print(len(model_v3.layers)) layer1 = model.layers[1] weight1 = layer1.get_weights() model_v3.layers[1].set_weights(weight1) layer5 = model.layers[5] weight5 = layer5.get_weights() model_v3.layers[5].set_weights(weight5) layer9 = model.layers[9] weight9 = layer9.get_weights() model_v3.layers[9].set_weights(weight9) re3 = model_v3.predict(np.array(test_x)) re3 = np.transpose(re3, (0,3,1,2)) for i in range(121): plt.subplot(11,11,i+1) plt.imshow(re3[0][i]) #, cmap='gray' plt.show()
2.kernel可视化:
def process(x):
  res = np.clip(x, 0, 1)
  return res
def dprocessed(x):
  res = np.zeros_like(x)
  res += 1
  res[x < 0] = 0
  res[x > 1] = 0
  return res
def deprocess_image(x):
  x -= x.mean()
  x /= (x.std() + 1e-5)
  x *= 0.1
  x += 0.5
  x = np.clip(x, 0, 1)
  x *= 255
  x = np.clip(x, 0, 255).astype('uint8')
  return x
for i_kernal in range(64):
  input_img=model.input
  loss = K.mean(model.layers[5].output[:, :,:,i_kernal])
  # loss = K.mean(model.output[:, i_kernal])
  # compute the gradient of the input picture wrt this loss
  grads = K.gradients(loss, input_img)[0]
  # normalization trick: we normalize the gradient
  grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
  # this function returns the loss and grads given the input picture
  iterate = K.function([input_img, K.learning_phase()], [loss, grads])
  # we start from a gray image with some noise
  np.random.seed(0)
  num_channels=1
  img_height=img_width=38
  input_img_data = (255- np.random.randint(0,255,(1, img_height, img_width, num_channels))) / 255.
  failed = False
  # run gradient ascent
  print('####################################',i_kernal+1)
  loss_value_pre=0
  for i in range(10000):
    # processed = process(input_img_data)
    # predictions = model.predict(input_img_data)
    loss_value, grads_value = iterate([input_img_data,1])
    # grads_value *= dprocessed(input_img_data[0])
    if i%1000 == 0:
      # print(' predictions: ' , np.shape(predictions), np.argmax(predictions))
      print('Iteration %d/%d, loss: %f' % (i, 10000, loss_value))
      print('Mean grad: %f' % np.mean(grads_value))
      if all(np.abs(grads_val) < 0.000001 for grads_val in grads_value.flatten()):
        failed = True
        print('Failed')
        break
      # print('Image:\n%s' % str(input_img_data[0,0,:,:]))
      if loss_value_pre != 0 and loss_value_pre > loss_value:
        break
      if loss_value_pre == 0:
        loss_value_pre = loss_value
      # if loss_value > 0.99:
      #   break
    input_img_data += grads_value * 1 #e-3
  plt.subplot(8, 8, i_kernal+1)
  # plt.imshow((process(input_img_data[0,:,:,0])*255).astype('uint8'), cmap='Greys') #cmap='Greys'
  img_re = deprocess_image(input_img_data[0])
  img_re = np.reshape(img_re, (38,38))
  plt.imshow(img_re, cmap='Greys') #cmap='Greys'
  # plt.show()
plt.show()
model.layers[1]
model.layers[5]
model.layers[-1]
以上这篇keras模型可视化,层可视化及kernel可视化实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持。
                                    标签:
                                        
                            keras,kernel,可视化
                                免责声明:本站文章均来自网站采集或用户投稿,网站不提供任何软件下载或自行开发的软件!
                                如有用户或公司发现本站内容信息存在侵权行为,请邮件告知! 858582#qq.com
                            
                        暂无“keras模型可视化,层可视化及kernel可视化实例”评论...
                                    稳了!魔兽国服回归的3条重磅消息!官宣时间再确认!
昨天有一位朋友在大神群里分享,自己亚服账号被封号之后居然弹出了国服的封号信息对话框。
这里面让他访问的是一个国服的战网网址,com.cn和后面的zh都非常明白地表明这就是国服战网。
而他在复制这个网址并且进行登录之后,确实是网易的网址,也就是我们熟悉的停服之后国服发布的暴雪游戏产品运营到期开放退款的说明。这是一件比较奇怪的事情,因为以前都没有出现这样的情况,现在突然提示跳转到国服战网的网址,是不是说明了简体中文客户端已经开始进行更新了呢?
更新动态
2025年11月04日
                                2025年11月04日
                    - 小骆驼-《草原狼2(蓝光CD)》[原抓WAV+CUE]
 - 群星《欢迎来到我身边 电影原声专辑》[320K/MP3][105.02MB]
 - 群星《欢迎来到我身边 电影原声专辑》[FLAC/分轨][480.9MB]
 - 雷婷《梦里蓝天HQⅡ》 2023头版限量编号低速原抓[WAV+CUE][463M]
 - 群星《2024好听新歌42》AI调整音效【WAV分轨】
 - 王思雨-《思念陪着鸿雁飞》WAV
 - 王思雨《喜马拉雅HQ》头版限量编号[WAV+CUE]
 - 李健《无时无刻》[WAV+CUE][590M]
 - 陈奕迅《酝酿》[WAV分轨][502M]
 - 卓依婷《化蝶》2CD[WAV+CUE][1.1G]
 - 群星《吉他王(黑胶CD)》[WAV+CUE]
 - 齐秦《穿乐(穿越)》[WAV+CUE]
 - 发烧珍品《数位CD音响测试-动向效果(九)》【WAV+CUE】
 - 邝美云《邝美云精装歌集》[DSF][1.6G]
 - 吕方《爱一回伤一回》[WAV+CUE][454M]
 
                        


