Making an AI security app using django and react-native, but the output that is sent by django does not show on android and ios but does show on Web

Making a security app that will using a classification model detect who is at my door (members of my family for now) and show it on an App that i am making in react-native. Ive got most of the code figured out but the problem i am now facing is that when django sends the feed (image by image im presuming) the mobile apps dont show the feed (using expo go) but the web app does.

Views.py:

from django.shortcuts import render
from django.views.decorators import gzip
from django.http import StreamingHttpResponse
import cv2
import threading
import numpy as np
from tensorflow.keras.models import load_model

# Load the face recognition model
model = load_model('./final_face_recognition_model.keras')

# Define class labels
class_labels = ["person1", "person2", "person3"]

# Load OpenCV's pre-trained Haar Cascade for face detection
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

# Define a function to preprocess frames for the model
def preprocess_frame(frame, x, y, w, h):
    """
    Extract the face region, resize, normalize, and reshape it for the model.
    """
    face = frame[y:y+h, x:x+w]
    input_size = (160, 160)  # Model's input size
    resized_face = cv2.resize(face, input_size)
    normalized_face = resized_face / 255.0  # Normalize pixel values
    reshaped_face = np.expand_dims(normalized_face, axis=0)  # Add batch dimension
    return reshaped_face


class VideoCamera(object):
    def __init__(self):
        self.video = cv2.VideoCapture(0)
        (self.grabbed, self.frame) = self.video.read()
        threading.Thread(target=self.update, args=()).start()

    def __del__(self):
        self.video.release()

    def get_frame(self):
        image = self.frame
        if image is not None:
            # Convert the frame to grayscale for face detection
            gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            
            # Detect faces
            faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.1, minNeighbors=5, minSize=(50, 50))
            
            for (x, y, w, h) in faces:
                # Draw a rectangle around the detected face
                cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)

                # Preprocess the face region for classification
                preprocessed_face = preprocess_frame(image, x, y, w, h)
                predictions = model.predict(preprocessed_face)  # Predict the face
                
                # Get the predicted class and confidence
                predicted_class = np.argmax(predictions, axis=1)[0]
                confidence = predictions[0][predicted_class]
                class_label = class_labels[predicted_class]

                # Annotate the rectangle with the predicted label and confidence
                cv2.putText(image, f"{class_label} ({confidence:.2f})", 
                            (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (255, 255, 255), 2)

        # Encode the frame for streaming
        _, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes()

    def update(self):
        while True:
            (self.grabbed, self.frame) = self.video.read()


def gen(camera):
    while True:
        frame = camera.get_frame()
        yield (b'--frame\r\n'
              b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')


@gzip.gzip_page
def livefe(request):
    print("REQS RECEIVED BRUTHU")
    try:
        cam = VideoCamera()
        return StreamingHttpResponse(gen(cam), content_type="multipart/x-mixed-replace;boundary=frame")
    except Exception as e:
        print(f"Error: {e}")
        pass


# Testing function
def say_hello(request): 
    print("HELLO SUP ")
    return render(request, 'hello.html', {"name": "User"})

MonitorScreen.jsx

import React, { useState, useEffect, useRef } from 'react';
import { StyleSheet, Text, TouchableOpacity, View, Image } from 'react-native';
import { AntDesign } from '@expo/vector-icons';
import { useRouter } from 'expo-router';
import { useFocusEffect } from '@react-navigation/native';
import Video from 'expo-video';

export default function MonitorScreen() {
  const router = useRouter();
  const [facing, setFacing] = useState('front');
  const [videoUrl, setVideoUrl] = useState('http://192.168.0.101:8000/playground/vid');
  const videoPlayer = useRef(null);
  function toggleCameraFacing() {
    setFacing((current) => (current === 'back' ? 'front' : 'back'));
    // Adjust the video URL if needed when switching cameras
  }



  return (
    <View style={styles.container}>
      {/* Use Image to display the video feed */}
      <View>

        <Image
          source={{uri:videoUrl}}
          style={styles.camera}

        />
      </View>


      <View style={styles.bottomNav}>
        <TouchableOpacity
          style={styles.navItem}
          onPress={() => router.push('StudentListScreen')}
        >
          <AntDesign name="home" size={24} color="#FFF" />
          <Text style={styles.navText}>Home</Text>
        </TouchableOpacity>

        <TouchableOpacity
          style={styles.navItem}
          onPress={() => router.push('MonitorScreen')}
        >
          <AntDesign name="camera" size={24} color="#FFF" />
          <Text style={styles.navText}>Cams</Text>
        </TouchableOpacity>

        <TouchableOpacity
          style={styles.navItem}
          onPress={() => router.push('AccountScreen')}
        >
          <AntDesign name="user" size={24} color="#FFF" />
          <Text style={styles.navText}>Account</Text>
        </TouchableOpacity>
      </View>
    </View>
  );
}

const styles = StyleSheet.create({
  container: {
    flex: 1,
    justifyContent: 'center',
  },
  camera: {
    flex: 1,
    justifyContent: 'center',
    alignItems: 'center',
    padding: 20,
    width: '100%', // Full screen width
    height: '100%', // Full screen height
  },
  buttonContainer: {
    flex: 1,
    flexDirection: 'row',
    backgroundColor: 'transparent',
    margin: 64,
  },
  button: {
    flex: 1,
    alignSelf: 'flex-end',
    alignItems: 'center',
    marginBottom: '40',
  },
  text: {
    fontSize: 24,
    fontWeight: 'bold',
    color: 'white',
  },
  bottomNav: {
    flexDirection: 'row',
    justifyContent: 'space-around',
    alignItems: 'center',
    backgroundColor: '#293241',
    paddingVertical: 10,
    position: 'absolute',
    bottom: 0,
    left: 0,
    right: 0,
  },
  navItem: {
    alignItems: 'center',
  },
  navText: {
    color: '#FFF',
    fontSize: 12,
    marginTop: 5,
  },
});

Ive tried using the Video module in expo-video but i think that one is for already made videos and not for streaming a video directly. For now i am running a local server and the requests are made by devices on the same network, ive seen both mobile devices send successful API calls (the calls start the camera) but nothing is displayed on my mobile device except for the nav menu at the bottom. The rest of the app is working fine, its just this one page that is giving me a headache.

Maybe using WebSockets is a better approach to transmit that type of data.

Вернуться на верх