Announcement

Collapse
No announcement yet.

How to use the camera as a sensor

Collapse
X
 
  • Filter
  • Time
  • Show
Clear All
new posts

  • How to use the camera as a sensor

    Below are the instructions for using the phone's built in camera for sensing purposes

    First in the FtcRobotControllerActivity file add

    Code:
    import android.hardware.Camera
    Then add
    Code:
    public Camera camera;
    private Camera openFrontFacingCamera() {
        int cameraId = -1;
        Camera cam = null;
        int numberOfCameras = Camera.getNumberOfCameras();
        for (int i = 0; i < numberOfCameras; i++) {
          Camera.CameraInfo info = new Camera.CameraInfo();
          Camera.getCameraInfo(i, info);
          if (info.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
            cameraId = i;
            break;
          }
        }
        try {
          cam = Camera.open(cameraId);
        } catch (Exception e) {
    
        }
        return cam;
      }
    
    public void initPreview(final Camera camera, final CameraOp context, final Camera.PreviewCallback previewCallback) {
        runOnUiThread(new Runnable() {
          @Override
          public void run() {
            context.preview = new CameraPreview(FtcRobotControllerActivity.this, camera, previewCallback);
            FrameLayout previewLayout = (FrameLayout) findViewById(R.id.previewLayout);
            previewLayout.addView(context.preview);
          }
        });
      }
    to the class

    Now add an opmode that has this in it

    Code:
    package com.qualcomm.ftcrobotcontroller.opmodes;
    
    import com.qualcomm.ftcrobotcontroller.CameraPreview;
    import com.qualcomm.ftcrobotcontroller.FtcRobotControllerActivity;
    import com.qualcomm.robotcore.eventloop.opmode.OpMode;
    import java.io.ByteArrayOutputStream;
    
    import android.graphics.Bitmap;
    import android.graphics.BitmapFactory;
    import android.graphics.ImageFormat;
    import android.graphics.Rect;
    import android.graphics.YuvImage;
    import android.hardware.Camera;
    import android.util.Log;
    
    /**
     * TeleOp Mode
     * <p>
     *Enables control of the robot via the gamepad
     */
    public class CameraOp extends OpMode {
      private Camera camera;
      public CameraPreview preview;
      public Bitmap image;
      private int width;
      private int height;
      private YuvImage yuvImage = null;
      private int looped = 0;
      private String data;
    
      private int red(int pixel) {
        return (pixel >> 16) & 0xff;
      }
    
      private int green(int pixel) {
        return (pixel >> 8) & 0xff;
      }
    
      private int blue(int pixel) {
        return pixel & 0xff;
      }
    
      private Camera.PreviewCallback previewCallback = new Camera.PreviewCallback() {
        public void onPreviewFrame(byte[] data, Camera camera)
        {
          Camera.Parameters parameters = camera.getParameters();
          width = parameters.getPreviewSize().width;
          height = parameters.getPreviewSize().height;
          yuvImage = new YuvImage(data, ImageFormat.NV21, width, height, null);
          looped += 1;
        }
      };
    
      private void convertImage() {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        yuvImage.compressToJpeg(new Rect(0, 0, width, height), 0, out);
        byte[] imageBytes = out.toByteArray();
        image = BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.length);
      }
      /*
       * Code to run when the op mode is first enabled goes here
       * @see com.qualcomm.robotcore.eventloop.opmode.OpMode#start()
       */
      @Override
      public void init() {
        camera = ((FtcRobotControllerActivity)hardwareMap.appContext).camera;
        camera.setPreviewCallback(previewCallback);
    
        Camera.Parameters parameters = camera.getParameters();
        data = parameters.flatten();
    
        ((FtcRobotControllerActivity) hardwareMap.appContext).initPreview(camera, this, previewCallback);
      }
    
      /*
       * This method will be called repeatedly in a loop
       * @see com.qualcomm.robotcore.eventloop.opmode.OpMode#loop()
       */
      public int highestColor(int red, int green, int blue) {
        int[] color = {red,green,blue};
        int value = 0;
        for (int i = 1; i < 3; i++) {
          if (color[value] < color[i]) {
            value = i;
          }
        }
        return value;
      }
    
      @Override
      public void loop() {
        if (yuvImage != null) {
          int redValue = 0;
          int blueValue = 0;
          int greenValue = 0;
          convertImage();
          for (int x = 0; x < width; x++) {
            for (int y = 0; y < height; y++) {
              int pixel = image.getPixel(x, y);
              redValue += red(pixel);
              blueValue += blue(pixel);
              greenValue += green(pixel);
            }
          }
          int color = highestColor(redValue, greenValue, blueValue);
          String colorString = "";
          switch (color) {
            case 0:
              colorString = "RED";
              break;
            case 1:
              colorString = "GREEN";
              break;
            case 2:
              colorString = "BLUE";
          }
          telemetry.addData("Color:", "Color detected is: " + colorString);
        }
        telemetry.addData("Looped","Looped " + Integer.toString(looped) + " times");
        Log.d("DEBUG:",data);
      }
    }
    Included in the example is code to detect which color is the most prevalent in the picture.

    Add a class called CameraPreview with this in it

    Code:
    package com.qualcomm.ftcrobotcontroller;
    
    import android.content.Context;
    import android.util.Log;
    import android.view.SurfaceHolder;
    import android.view.SurfaceView;
    import android.hardware.Camera;
    
    import java.io.IOException;
    
    /**
     * Created by FTC-5648 on 9/15/2015.
     */
    public class CameraPreview extends SurfaceView implements SurfaceHolder.Callback {
      private SurfaceHolder mHolder;
      private Camera mCamera;
      private static String TAG = "DEBUG";
      private Camera.PreviewCallback previewCallback;
    
      public CameraPreview(Context context, Camera camera, Camera.PreviewCallback previewCallback) {
        super(context);
        mCamera = camera;
    
        this.previewCallback = previewCallback;
    
        // Install a SurfaceHolder.Callback so we get notified when the
        // underlying surface is created and destroyed.
        mHolder = getHolder();
        mHolder.addCallback(this);
        // deprecated setting, but required on Android versions prior to 3.0
        mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
      }
    
      public void surfaceCreated(SurfaceHolder holder) {
        // The Surface has been created, now tell the camera where to draw the preview.
        try {
          mCamera.setPreviewCallback(previewCallback);
          mCamera.setPreviewDisplay(holder);
          mCamera.startPreview();
        } catch (IOException e) {
          Log.d(TAG, "Error setting camera preview: " + e.getMessage());
        }
      }
    
      public void surfaceDestroyed(SurfaceHolder holder) {
        // empty. Take care of releasing the Camera preview in your activity.
      }
    
      public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) {
        // If your preview can change or rotate, take care of those events here.
        // Make sure to stop the preview before resizing or reformatting it.
    
        if (mHolder.getSurface() == null){
          // preview surface does not exist
          return;
        }
    
        // stop preview before making changes
        try {
          mCamera.stopPreview();
        } catch (Exception e){
          // ignore: tried to stop a non-existent preview
        }
    
        // set preview size and make any resize, rotate or
        // reformatting changes here
    
        // start preview with new settings
        try {
          mCamera.setPreviewCallback(previewCallback);
          mCamera.setPreviewDisplay(mHolder);
          mCamera.startPreview();
    
        } catch (Exception e){
          Log.d(TAG, "Error starting camera preview: " + e.getMessage());
        }
      }
    }
    add the following lines to the android_manifest

    Code:
    <uses-permission android:name="android.permission.CAMERA" />
    
    <uses-feature android:name="android.hardware.camera" />
    <uses-feature android:name="android.hardware.camera.front" />
    Finally, open the activity_ftc_controller xml file and add

    Code:
    <FrameLayout
            android:layout_width="100dp"
            android:layout_height="100dp"
            android:layout_below="@+id/textErrorMessage"
            android:layout_centerHorizontal="true"
            android:id="@+id/previewLayout">
        </FrameLayout>
    to the last RelativeLayout

    This isn't a complete implementation, it is just something quick that I put together in about an hour, for instance if you stop the opmode and restart it the preview breaks; however, the opmode itself still works. I might have forgotten something so if you have trouble getting it to work yourself or have any questions ask.

  • #2
    Thanks for putting this up!

    I'm trying to get it to work, but I'm getting an uncaught exception. In trying to track this down, it looks like openFrontFacingCamera is never called, so the camera is never set up (and thus later references to it are null, resulting in the exception). Where should openFrontFacingCamera be called from?

    Thanks again!

    Comment


    • #3
      I put
      Code:
      camera=openFrontFacingCamera();
      in OnCreate in the activity and it works! Cool! Thanks!

      Comment


      • #4
        I gave this a try - for starters, CameraOp, CameraPreview were undefined. Please suggest how to fix...
        I had to add
        import android.widget.FrameLayout; to deal with related undefineds.
        Thanks,
        Martin Haeberli
        FTC #7593 TigerBots

        Comment


        • #5
          Although the vision processing this year is fairly simple, it might be the case that it will not be in coming years, so OpenCV might be a good idea to use. I'm using it at the moment to cause less strain on my team to transition (as we have a lot of computer scientists but nobody good with build systems/dependency management) after I graduate, as well as because of the fact that I'm also learning it for a different engineering challenge. I'll share some of the snippets (e.g. NV21 YUV to RGB) and dependency structure if anyone is interested.
          FTC6460 mentor (software+computer vision+electronics), FPGA enthusiast. In favor of allowing custom electronics on FTC bots.
          Co-founder of ##ftc live chat for FTC programming--currently you may need to join and wait some time for help--volunteer basis only.

          Comment


          • #6
            Originally posted by hexafraction View Post
            Although the vision processing this year is fairly simple, it might be the case that it will not be in coming years, so OpenCV might be a good idea to use. I'm using it at the moment to cause less strain on my team to transition (as we have a lot of computer scientists but nobody good with build systems/dependency management) after I graduate, as well as because of the fact that I'm also learning it for a different engineering challenge. I'll share some of the snippets (e.g. NV21 YUV to RGB) and dependency structure if anyone is interested.
            Yes, please. I would love to see a similar example to the one at the top of the thread but using OpenCV to see how they differ.

            Comment


            • #7
              Originally posted by FTC_Team5648 View Post
              Below are the instructions for using the phone's built in camera for sensing purposes...
              5648, do you know much about thread safety? I know a little bit, but just enough to get me into trouble.

              I'm curious to know if the image buffering in your example is threadsafe. In particular, yuvImage gets written to in the camera preview callback, but then it gets read from in convertImage which is called in the OpMode loop method. Is there any potential conflict where loop could be interrupted by the callback, overwriting yuvImage in the middle of a conversion? If so, does anybody have suggestions on how to make it threadsafe without stalling loop for too long?

              Comment


              • #8
                I have two implementations I can show with OpenCV, both being in my team's repository. One is a helper class that adds a preview to an existing view (to act as a part of the controller activity): https://github.com/niskyRobotics/ftc...ityHelper.java It hasn't been tested yet in terms of Android-specific stuff on an actual bot (although the OpenCV code itself was tested). Construct an instance passing the controller activity and call attach().

                Alternate is an entire activity in itself. See https://github.com/niskyRobotics/ftc...yActivity.java

                Both were adapted from a previous project of mine. Static functions add callbacks for ALL instances of that class (doesn't seem to be an issue in this case)
                FTC6460 mentor (software+computer vision+electronics), FPGA enthusiast. In favor of allowing custom electronics on FTC bots.
                Co-founder of ##ftc live chat for FTC programming--currently you may need to join and wait some time for help--volunteer basis only.

                Comment


                • #9
                  Originally posted by mhaeberli View Post
                  I gave this a try - for starters, CameraOp, CameraPreview were undefined. Please suggest how to fix...
                  I had to add
                  import android.widget.FrameLayout; to deal with related undefineds.
                  Thanks,
                  Martin Haeberli
                  FTC #7593 TigerBots
                  CameraOp and CameraPreview are classes that you created (using the code provided by 5648). Make sure they're in separate class files and then make sure that they're imported appropriately where needed.

                  Comment


                  • #10
                    Originally posted by mhaeberli View Post
                    I gave this a try - for starters, CameraOp, CameraPreview were undefined. Please suggest how to fix...
                    I had to add
                    import android.widget.FrameLayout; to deal with related undefineds.
                    Thanks,
                    Martin Haeberli
                    FTC #7593 TigerBots
                    CameraOp and CameraPreview are classes that you created (using the code provided by 5648). Make sure they're in separate class files and then make sure that they're imported appropriately where needed.

                    Comment


                    • #11
                      This seems like a fun project. Can anyone provide pros an cons to using this approach in competition, vs one of the recommended color sensors?

                      Comment


                      • #12
                        Originally posted by dlevy View Post
                        This seems like a fun project. Can anyone provide pros an cons to using this approach in competition, vs one of the recommended color sensors?
                        Just my opinions:

                        Pros:
                        No extra cost.
                        Potentially accurate at a greater range. Doesn't need to be right next to the beacon.
                        Could use it for multiple purposes? floor line tracking, finding ramps, finding debris, locking onto churros or bars, etc.??
                        Great way to teach advanced kids about images, RGB, signal processing, etc.

                        Cons:
                        Can take up processing time, possibly slowing loops down.
                        Can be susceptible to lights, reflections, etc.
                        Requires that the robot controller device be mounted in a very specific spot, whereas using other sensors provides flexibility in sensor location.
                        Can only use one, whereas with traditional sensors you could use multiple sensors of different types.
                        Hasn't been an official ruling yet about whether camera use is even allowed, though the rules appear to only prohibit the use of functional "additional electronic... video devices" in RE06. RE01a specifically allows (and requires) the android robot controller device and makes no limitations on the use of its capabilities.

                        Comment


                        • #13
                          Originally posted by Cheer4FTC View Post
                          Just my opinions:

                          Pros:
                          No extra cost.
                          Potentially accurate at a greater range. Doesn't need to be right next to the beacon.
                          Could use it for multiple purposes? floor line tracking, finding ramps, finding debris, locking onto churros or bars, etc.??
                          Great way to teach advanced kids about images, RGB, signal processing, etc.

                          Cons:
                          Can take up processing time, possibly slowing loops down.
                          Can be susceptible to lights, reflections, etc.
                          Requires that the robot controller device be mounted in a very specific spot, whereas using other sensors provides flexibility in sensor location.
                          Can only use one, whereas with traditional sensors you could use multiple sensors of different types.
                          Hasn't been an official ruling yet about whether camera use is even allowed, though the rules appear to only prohibit the use of functional "additional electronic... video devices" in RE06. RE01a specifically allows (and requires) the android robot controller device and makes no limitations on the use of its capabilities.
                          On the issue of processing time: There is a HUGE difference between camera support code executing on the Snapdragon's CPU, versus executing on its attached GPU. The last time I checked, OpenCV has only been targeted toward a family of GPUs made by nVidia, NOT the GPUs integrated with Snapdragon CPUs.

                          We would really have something if OpenCV image processing code could run in the GPU, leaving the Snapdragon CPU to run the rest of the robot code!

                          Comment


                          • #14
                            I believe that Qualcomm has released "FastCV" for their processors. Really advanced teams may be able to do some advanced stuff with this (tracking, etc.):
                            https://developer.qualcomm.com/software/fastcv-sdk

                            For simple things, I've been able to down-sample the images way down to like 80x40 and do color detection, etc., on the smaller image in real time in just a few ms. You'd be surprised what you can do with an image at this size. Using high res at high frame rates may require FastCV or something similar.

                            I'm also thinking of releasing something like "OpModeCameraVideo" and "OpModelCameraStill" that teams can extend like they extend normal OpModes, but which has the camera management taken care of. Then teams could just grab the most recent image (in the Video mode) or take a single still photo (in the Still mode) and do whatever processing they want on it the pixels without having to understand Android camera management, callbacks, etc. Thoughts?

                            Comment


                            • #15
                              Originally posted by Cheer4FTC View Post
                              For simple things, I've been able to down-sample the images way down to like 80x40 and do color detection, etc., on the smaller image in real time in just a few ms.
                              I'm not sure how you're doing the down-sampling but it helps considerably to tell the camera to shoot at a lower resolution to start with:

                              Code:
                              Camera.Parameters p =  mCamera.getParameters();
                              p.setPictureSize(640, 480);

                              Comment

                              Working...
                              X