本文共 5750 字,大约阅读时间需要 19 分钟。
是一个项目的一个功能之一,调试了两小时,终于能够
javascript设置开始计和暂停计时 监控人脸 记录时间了 效果图: 离开页面之后回到页面会从0计时(不是关闭页面,而是页面失去焦点) 离开摄像头时会弹出提示。 离开摄像头反馈给后端的时间。全部代码:
开始听课 结束听课 暂时离开 您上课的时间:
其中包含了图片解码编码,打开摄像头,获取后端结果。
服务器有两个,一个是python的pytorch深度学习处理图片 在flask框架下,一个是java的springboot来获得离开摄像头的时间。 java部分: 实体类:package com.naughty.userlogin02.bean;import lombok.Data;@Datapublic class Nowtime { int id; String timenot; String total; String nowtime; String begintime;}
跨域请求:
@RestControllerpublic class Timecontroller { @Autowired TimeDao timeDao; static int id = 0; @CrossOrigin @PostMapping("/gettime") public String getteacherList(@RequestBody String time){ id++; System.out.println(time); // System.out.println(nowtime.getId()); MapjsonMap = JSON.parseObject(time); System.out.println(jsonMap.get("total")); LocalDate date = LocalDate.now(); System.out.println(date); Nowtime nowtime = new Nowtime(); nowtime.setNowtime(date.toString()); String ns=jsonMap.get("timenot").toString(); String totaltime=jsonMap.get("total").toString(); String begintime = jsonMap.get("begintime").toString(); nowtime.setTimenot(ns); nowtime.setTotal(totaltime); nowtime.setId(id); nowtime.setBegintime(begintime); timeDao.addtime(nowtime); return "ok"; //return timenot; } @GetMapping("/gettime") public String getalltime(){ System.out.println("time!"); List nowtimes = timeDao.getall(); HashMap res = new HashMap<>(); res.put("data",nowtimes); String users_json = JSON.toJSONString(res); return users_json; }}
xml语句:
insert into data1.gettime(timenot,total,nowtime,begintime) values (#{timenot},#{total},#{nowtime},#{begintime});
python的主程序:
import base64from predict import class_namesimport torchfrom torchvision import datasets, models, transformsimport cv2import numpy as npimport requestsfrom flask import Flask,make_response, jsonifyimport flaskfrom flask_cors import CORSimport socketimport threadingimport jsonimport osfrom io import BytesIOfrom multiprocessing import Processimport iofrom PIL import Image# 配置全局appapp = Flask(__name__)# 导入index中定义的所有函数#from autotrade.server.index import *def run_index(): # 启动web服务器,使用多线程方式,接收所有http请求 app.run(host='0.0.0.0', port=5000, threaded=True)def make_new_response(data): res = make_response(jsonify({ 'code': 0, 'data': data})) res.headers['Access-Control-Allow-Origin'] = '*' res.headers['Access-Control-Allow-Method'] = '*' res.headers['Access-Control-Allow-Headers'] = '*' return resdef decode_base64(data): """Decode base64, padding being optional. :param data: Base64 data as an ASCII byte string :returns: The decoded byte string. """ missing_padding = len(data) % 4 if missing_padding != 0: data += b'='* (4 - missing_padding) # return base64.decodestring(data) return base64.b64decode(data)@app.route("/test")def test(): res = "{'no':'dddd'}" return make_new_response(res)CORS(app, resources=r'/*', supports_credentials=True)basedir = os.path.abspath(os.path.dirname(__file__))transform=transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.224,0.225]) ])@app.route('/getpic', methods=['POST'])def getpic(): data = json.loads(flask.request.get_data("data")) data_64 = str.encode(data['data']) print(type(data_64)) #print(data_64) print('------------------------') print(str(data_64, 'utf8')) imgdata = decode_base64(data_64) file = open('1.jpg', 'wb') file.write(imgdata) file.close() image = Image.open(r"1.jpg").convert('RGB') image = transform(image).unsqueeze(0) modelme = torch.load('modefresnet.pkl') modelme.eval() outputs = modelme(image) _, predict = torch.max(outputs.data, 1) for j in range(image.size()[0]): print('predicted: {}'.format(class_names[predict[j]])) return class_names[predict[j]] if __name__ == "__main__": app.run(debug=True)
深度学习的处理图片的网络模型就不贴了,需要的可以留言
用的是Resnet残差网络。识别速度还是很快的,判断的正确率也比较高。(训练的数据集很少,只有六百多张)##Y1BCojf69##4;1%yBNfY3ne6a!/
下例为从指定的层提取ResNet50的特征。import torch
from torch import nn import torchvision.models as models import torchvision.transforms as transforms import cv2class FeatureExtractor(nn.Module): # 提取特征工具
def init(self, submodule, extracted_layers): super(FeatureExtractor, self).init() self.submodule = submodule self.extracted_layers = extracted_layersdef forward(self, x): outputs = [] for name, module in self.submodule._modules.items(): if name is "fc": x = x.view(x.size(0), -1) x = module(x) if name in self.extracted_layers: outputs.append(x) return outputs
model = models.resnet50(pretrained=True) # 加载resnet50工具
model = model.cuda() model.eval()img=cv2.imread(‘test.jpg’) # 加载图片
img=cv2.resize(img,(224,224)); img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) img=transform(img).cuda() img=img.unsqueeze(0)model2 = FeatureExtractor(model, [‘layer3’]) # 指定提取 layer3 层特征
with torch.no_grad(): out=model2(img) print(len(out), out[0].shape)转载地址:http://lvten.baihongyu.com/